From 2864680f54d9f8977b990b5d939ad333fd3b94b9 Mon Sep 17 00:00:00 2001 From: Anton Kolesnikov Date: Tue, 13 Aug 2024 10:50:43 +0800 Subject: [PATCH] New architecture prototype (#3388) * add metastore API definition scratch * Add metastore API definition (#3391) * WIP: Add dummy metastore (#3394) * Add dummy metastore * Add metastore client (#3397) * experiment: new architecture deployment (#3401) * remove query-worker service for now * fix metastore args * enable persistence for metastore * WIP: Distribute profiles based on tenant id, service name and labels (#3400) * Distribute profiles to shards based on tenant/service_name/labels with no replication * Add retries in case of delivery issues (WIP) * Calculate shard for ingested profiles, send to ingesters in push request * Set replication factor and token count via config * Fix tests * Run make helm/check check/unstaged-changes * Run make reference-help * Simplify shard calculation * Add a metric for distributed bytes * Register metric * Revert undesired change * metastore bootstrap include self * fix ingester ring replication factor * delete helm workflows * wip: segments writer (#3405) * start working on segments writer add shards awaiting segment flush in requests upload blocks add some tracing * upload meta in case of metastore error * do not upload metadata to dlq * add some flags * skip some tests. fmt * skip e2e tests * maybe fix microservices_test.go. I have no idea what im doing * change partition selection * rm e2e yaml * fmt * add compaction planner API definition * rm unnecesary nested dirs * debug log head stats * more debug logs * fix skipping empty head * fix tests * pass shards * more debug logs * fix nil deref in ingester * more debugs logs in segmentsWriter * start collecting some metrics for segments * hack: purge state segments * hack: purge stale segments on the leader * more segment metrics * more segment metrics * more segment metrics * more segment metrics * make fmt * more segment metrics * fix panic caused by the metric with the same name * more segment metrics * more segment metrics * make fmt * decrease page buffer size * decrease page buffer size, tsdb buffer writer size * separate parquet write buffer size for segments and compacted blocks * separate index write buffer size for segments and compacted blocks * improve segment metrics - decrease cardinality by removing service as label * fix head metrics recreation via phlarectx usage ;( * try to pool newParquetProfileWriter * Revert "try to pool newParquetProfileWriter" This reverts commit d91e3f1a1e5476b49df2fd9d90715cf751679a97. * decrease tsdb index buffers * decrease tsdb index buffers * experiment: add query backend (#3404) * Add query-backend component * add generated code for connect * add mergers * block reader scratches * block reader updates * query frontend integration * better query planning * tests and fixes * improve API * profilestore: use single row group * profile_store: pool profile parquet writer * profiles parquet encoding: fix profile column count * segments: rewrite shards to flush independently * make fmt * segments: flush heads concurrently * segments: tmp debug log * segments: change wait duration metric buckets * add inmemory tsdb index writer * rm debug log * use inmemory index writer * remove FileWriter from inmem index * inmemory tsdb index writer: reuse buffers through pool * inmemory tsdb index writer: preallocate initial buffers * segment: concat files with preallocated buffers * experiment: query backend block reader (#3423) * simplify report merge handling * implement query context and tenant service section * implement LabelNames and LabelValues * bind tsdb query api * bind time series query api * better caching * bind stack trace api * implement tree query * fix offset shift * update helm chart * tune buffers * add block object size attribute * serve profile type query from metastore * tune grpc server config * segment: try to use memfs * Revert "segment: try to use memfs" This reverts commit 798bb9dd2c00eea80f8ebdf3d0bab0244120968b. * tune s3 http client Before getting too deep with a custom TLS VerifyConnection function, it makes sense to ensure that we reuse connections as much as possible * WIP: Compaction planning in metastore (#3427) * First iteration of compaction planning in metastore * Second iteration of compaction planning in metastore * Add GetCompactionJobs back * Create and persist jobs in the same transaction as blocks * Add simple logging for compaction planning * Fix bug * Remove unused proto message * Remove unused raft command type * Add a simple config for compaction planning * WIP (new architecture): Add compaction workers, Integrate with planner (#3430) * Add compaction workers, Integrate with planner (wip) * Fix test * add compaction-worker service * refactor compaction-worker out of metastore * prevent bootstrapping a single node cluster on silent DNS failures * Scope compaction planning to shard+tenant * Improve state handling for compaction job status updates * Remove import * Reduce parquet buffer size for compaction workers * Fix another case of compaction job state inconsistency * refactor block reader out * handle nil blocks more carefully * add job priority queue with lease expiration and fencing token * disable boltdb sync We only use it to make snapshots * extend raft handlers with the raft log command * Add basic compaction metrics * Improve job assignments and status update logic * Remove segment truncation command * Make compaction worker job capacity configurable * Fix concurrent map access * Fix metric names * segment: change segment duration from 1s to 500ms * update request_duration_seconds buckets * update request_duration_seconds buckets * add an explicit parameter that controls how many raft peers to expect * fix the explicit parameter that controls how many raft peers to expect * temporary revert temporary hack I'm reverting it temporary to protect metastore from running out of memory * add some more metrics * add some pprof tags for easier visibility * add some pprof tags for easier visibility * add block merge draft * add block merge draft * update metrics buckets again * update metrics buckets again * Address minor consistency issues, improve naming, in-progress updates * increase boltdb InitialMmapSize * Improve metrics, logging * Decrease buffer sizes further, remove completed jobs * Scale up compaction workers and their concurrency * experiment: implement shard-aware series distribution (#3436) * tune boltdb snapshotting - increase initial mmap size - keep less entries in the raft log - trigger more frequently * compact job regardless of the block size * ingester & distributor scaling * update manifests * metastore ready check * make fmt * Revert "make fmt" This reverts commit 8a55391d9aeffebb8e2b80254fe3652aed54277e. * Revert "metastore ready check" This reverts commit 98b05da9810d0be1d338cfd493639b240b1c413c. * experiment: streaming compaction (#3438) * experiment: stream compaction * fix stream compaction * fix parquet footer optimization * Persist compaction job pre-queue * tune compaction-worker capacity * Fix bug where compaction jobs with level 1 and above are not created * Remove blocks older than 12 hours (instead of 30 minutes) * Fix deadlock when restoring compaction jobs * Add basic metrics for compaction workers * Load compacted blocks in metastore on restore * experimenting with object prefetch size * experimenting with object prefetch * trace read path * metastore readycheck * metastore readycheck * metastore readycheck. trigger rollout * metastore readycheck. trigger rollout * segments, include block id in errors * metastore: log addBlock error * segments: maybe fix retries * segments: maybe fix retries * segments: more debug logging * refactor query result aggregation * segments: more debug logging * segments: more debug logging * tune resource requests * tune compaction worker job capacity * fix time series step unit * Update golang version to 1.22.4 * enable grpc tracing for ingesters * expose /debug/requests * more debug logs * reset state when restoring from snapshot * Add debug logging * Persist job raft log index and lease expiry after assignment * Scale up a few components in the dev environment * more debug logs * metastore clinet: resolve addresses from endpointslice instead of dns * Update frontend_profile_types.go * metastore: add extra delay for readyness check * metastore: add extra delay for readyness check * metastore client: more debug log * fix compaction * stream statistics tracking: add HeavyKeeper implementation * Bump compaction worker resources * Bump compaction worker resources * improve read path load distribution * handle compaction synchronously * revert CI/CD changes * isolate experimental code * rollback initialization changes * rollback initialization changes * isolate distributor changes * isolate ingester changes * cleanup experimental code * remove large tsdb fixture copy * update cmd tests * revert Push API changes * cleanup dependencies * cleanup gitignore * fix reviewdog * go mod tidy * make generate * revert changes in tsdb/index.go * Revert "revert changes in tsdb/index.go" This reverts commit 2188cdee5fa5abcf88054ed78c2cbd70659c8d44. --------- Co-authored-by: Aleksandar Petrov <8142643+aleks-p@users.noreply.github.com> Co-authored-by: Tolya Korniltsev --- .github/workflows/test.yml | 29 +- .gitignore | 1 + .golangci.yml | 2 + api/compactor/v1/compactor.proto | 95 + api/gen/proto/go/compactor/v1/compactor.pb.go | 860 ++++ .../go/compactor/v1/compactor_vtproto.pb.go | 2212 ++++++++ .../compactorv1connect/compactor.connect.go | 147 + .../compactor.connect.mux.go | 32 + api/gen/proto/go/metastore/v1/metastore.pb.go | 799 +++ .../go/metastore/v1/metastore_vtproto.pb.go | 2296 +++++++++ .../metastorev1connect/metastore.connect.go | 173 + .../metastore.connect.mux.go | 37 + .../go/querybackend/v1/querybackend.pb.go | 1627 ++++++ .../v1/querybackend_vtproto.pb.go | 4500 +++++++++++++++++ .../querybackend.connect.go | 114 + .../querybackend.connect.mux.go | 27 + api/metastore/v1/metastore.proto | 71 + api/openapiv2/gen/phlare.swagger.json | 483 ++ api/querybackend/v1/querybackend.proto | 145 + go.mod | 21 +- go.sum | 25 +- go.work.sum | 43 +- pkg/experiment/compactor/compaction_worker.go | 297 ++ .../compactor/compaction_worker_metrics.go | 36 + .../distributor/distributor_series.go | 176 + .../distributor/distributor_sharding.go | 129 + .../singlereplica/singlereplica.go | 25 + pkg/experiment/ingester/loki/index/buf.go | 114 + pkg/experiment/ingester/loki/index/cmp.go | 76 + pkg/experiment/ingester/loki/index/index.go | 1999 ++++++++ .../ingester/loki/index/index_test.go | 597 +++ pkg/experiment/ingester/segment.go | 540 ++ pkg/experiment/ingester/segment_metrics.go | 117 + pkg/experiment/ingester/writer_offset.go | 48 + pkg/experiment/metastore/client/client.go | 103 + .../client/grpc_endpointslice_resolver.go | 202 + .../metastore/compactionpb/compaction.pb.go | 403 ++ .../metastore/compactionpb/compaction.proto | 47 + .../compactionpb/compaction_vtproto.pb.go | 620 +++ pkg/experiment/metastore/metastore.go | 279 + pkg/experiment/metastore/metastore_boltdb.go | 288 ++ .../metastore/metastore_bootstrap.go | 129 + .../metastore/metastore_compaction_planner.go | 258 + .../metastore/metastore_compaction_queue.go | 210 + .../metastore_compaction_queue_test.go | 71 + pkg/experiment/metastore/metastore_fsm.go | 218 + pkg/experiment/metastore/metastore_hack.go | 100 + pkg/experiment/metastore/metastore_metrics.go | 54 + .../metastore/metastore_readindex.go | 132 + pkg/experiment/metastore/metastore_state.go | 228 + .../metastore/metastore_state_add_block.go | 92 + .../metastore_state_poll_compaction_jobs.go | 342 ++ .../metastore/raftleader/raftleader.go | 133 + .../metastore/raftlogpb/raflog.pb.go | 292 ++ .../metastore/raftlogpb/raflog.proto | 21 + .../metastore/raftlogpb/raflog_vtproto.pb.go | 307 ++ pkg/experiment/querybackend/backend.go | 146 + .../querybackend/block/compaction.go | 606 +++ .../querybackend/block/compaction_test.go | 38 + .../querybackend/block/constants.go | 82 + pkg/experiment/querybackend/block/object.go | 243 + .../querybackend/block/section_profiles.go | 389 ++ .../querybackend/block/section_symbols.go | 24 + .../querybackend/block/section_tsdb.go | 49 + .../querybackend/block/tenant_service.go | 209 + .../querybackend/block/testdata/.gitignore | 1 + .../block/testdata/block-metas.json | 216 + .../01J2VJQPYDC160REPAD2VN88XN/block.bin | Bin 0 -> 22242 bytes .../01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin | Bin 0 -> 17664 bytes .../01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin | Bin 0 -> 21765 bytes .../01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin | Bin 0 -> 28169 bytes .../01J2VJQV544TF571FDSK2H692P/block.bin | Bin 0 -> 15785 bytes .../01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin | Bin 0 -> 27431 bytes .../01J2VJQYQVZTPZMMJKE7F2XC47/block.bin | Bin 0 -> 36655 bytes .../01J2VJQZPARDJQ779S1JMV0XQA/block.bin | Bin 0 -> 24273 bytes .../01J2VJR0R3NQS23SDADNA6XHCM/block.bin | Bin 0 -> 77958 bytes .../01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin | Bin 0 -> 19471 bytes pkg/experiment/querybackend/block/writer.go | 107 + pkg/experiment/querybackend/block_reader.go | 125 + pkg/experiment/querybackend/client/client.go | 73 + pkg/experiment/querybackend/query.go | 141 + .../querybackend/query_label_names.go | 100 + .../querybackend/query_label_values.go | 103 + .../querybackend/query_profile_entry.go | 90 + .../querybackend/query_series_labels.go | 94 + .../querybackend/query_time_series.go | 114 + pkg/experiment/querybackend/query_tree.go | 95 + .../querybackend/queryplan/query_plan.go | 349 ++ .../querybackend/queryplan/query_plan_test.go | 128 + .../querybackend/queryplan/testdata/plan.txt | 46 + .../querybackend/report_aggregator.go | 155 + pkg/experiment/queryfrontend/frontend_meta.go | 188 + .../queryfrontend/frontend_profile_types.go | 155 + ...ries.go => frontend_select_time_series.go} | 6 +- ...nd_series.go => frontend_series_labels.go} | 0 pkg/iter/iter.go | 8 + pkg/model/labels_merger.go | 101 + pkg/{iter => model}/profiles.go | 32 +- pkg/{iter => model}/profiles_test.go | 28 +- pkg/model/time_series.go | 200 + pkg/model/time_series_builder.go | 77 + .../{series.go => time_series_merger.go} | 28 +- ...ies_test.go => time_series_merger_test.go} | 0 pkg/model/time_series_test.go | 154 + pkg/model/tree.go | 50 +- pkg/model/tree_merger.go | 47 + .../providers/memory/bucket_client.go | 230 + pkg/objstore/read_only_file.go | 179 + pkg/objstore/reader.go | 45 + pkg/phlaredb/block_querier.go | 4 +- pkg/phlaredb/filter_profiles_bidi.go | 3 +- pkg/phlaredb/head.go | 17 + pkg/phlaredb/head_queriers.go | 23 +- pkg/phlaredb/querier.go | 1 + pkg/phlaredb/sample_merge.go | 79 +- pkg/phlaredb/symdb/block_reader.go | 85 +- pkg/phlaredb/tsdb/index/index.go | 21 +- pkg/phlaredb/tsdb/index/index_test.go | 16 +- pkg/phlaredb/tsdb/index/postings.go | 4 + pkg/phlaredb/tsdb/index/test_utils.go | 16 +- pkg/querier/querier.go | 158 +- pkg/querier/querier_test.go | 142 - pkg/querier/select_merge.go | 64 +- pkg/querier/select_merge_test.go | 7 +- pkg/util/bufferpool/pool.go | 98 + pkg/util/bufferpool/pool_test.go | 22 + pkg/util/health/health.go | 33 + pkg/util/http.go | 3 +- pkg/util/interceptor.go | 1 - pkg/util/recovery.go | 10 +- pkg/util/refctr/refctr.go | 35 +- 131 files changed, 27928 insertions(+), 587 deletions(-) create mode 100644 api/compactor/v1/compactor.proto create mode 100644 api/gen/proto/go/compactor/v1/compactor.pb.go create mode 100644 api/gen/proto/go/compactor/v1/compactor_vtproto.pb.go create mode 100644 api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.go create mode 100644 api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.mux.go create mode 100644 api/gen/proto/go/metastore/v1/metastore.pb.go create mode 100644 api/gen/proto/go/metastore/v1/metastore_vtproto.pb.go create mode 100644 api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.go create mode 100644 api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.mux.go create mode 100644 api/gen/proto/go/querybackend/v1/querybackend.pb.go create mode 100644 api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go create mode 100644 api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go create mode 100644 api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go create mode 100644 api/metastore/v1/metastore.proto create mode 100644 api/querybackend/v1/querybackend.proto create mode 100644 pkg/experiment/compactor/compaction_worker.go create mode 100644 pkg/experiment/compactor/compaction_worker_metrics.go create mode 100644 pkg/experiment/distributor/distributor_series.go create mode 100644 pkg/experiment/distributor/distributor_sharding.go create mode 100644 pkg/experiment/distributor/singlereplica/singlereplica.go create mode 100644 pkg/experiment/ingester/loki/index/buf.go create mode 100644 pkg/experiment/ingester/loki/index/cmp.go create mode 100644 pkg/experiment/ingester/loki/index/index.go create mode 100644 pkg/experiment/ingester/loki/index/index_test.go create mode 100644 pkg/experiment/ingester/segment.go create mode 100644 pkg/experiment/ingester/segment_metrics.go create mode 100644 pkg/experiment/ingester/writer_offset.go create mode 100644 pkg/experiment/metastore/client/client.go create mode 100644 pkg/experiment/metastore/client/grpc_endpointslice_resolver.go create mode 100644 pkg/experiment/metastore/compactionpb/compaction.pb.go create mode 100644 pkg/experiment/metastore/compactionpb/compaction.proto create mode 100644 pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go create mode 100644 pkg/experiment/metastore/metastore.go create mode 100644 pkg/experiment/metastore/metastore_boltdb.go create mode 100644 pkg/experiment/metastore/metastore_bootstrap.go create mode 100644 pkg/experiment/metastore/metastore_compaction_planner.go create mode 100644 pkg/experiment/metastore/metastore_compaction_queue.go create mode 100644 pkg/experiment/metastore/metastore_compaction_queue_test.go create mode 100644 pkg/experiment/metastore/metastore_fsm.go create mode 100644 pkg/experiment/metastore/metastore_hack.go create mode 100644 pkg/experiment/metastore/metastore_metrics.go create mode 100644 pkg/experiment/metastore/metastore_readindex.go create mode 100644 pkg/experiment/metastore/metastore_state.go create mode 100644 pkg/experiment/metastore/metastore_state_add_block.go create mode 100644 pkg/experiment/metastore/metastore_state_poll_compaction_jobs.go create mode 100644 pkg/experiment/metastore/raftleader/raftleader.go create mode 100644 pkg/experiment/metastore/raftlogpb/raflog.pb.go create mode 100644 pkg/experiment/metastore/raftlogpb/raflog.proto create mode 100644 pkg/experiment/metastore/raftlogpb/raflog_vtproto.pb.go create mode 100644 pkg/experiment/querybackend/backend.go create mode 100644 pkg/experiment/querybackend/block/compaction.go create mode 100644 pkg/experiment/querybackend/block/compaction_test.go create mode 100644 pkg/experiment/querybackend/block/constants.go create mode 100644 pkg/experiment/querybackend/block/object.go create mode 100644 pkg/experiment/querybackend/block/section_profiles.go create mode 100644 pkg/experiment/querybackend/block/section_symbols.go create mode 100644 pkg/experiment/querybackend/block/section_tsdb.go create mode 100644 pkg/experiment/querybackend/block/tenant_service.go create mode 100644 pkg/experiment/querybackend/block/testdata/.gitignore create mode 100644 pkg/experiment/querybackend/block/testdata/block-metas.json create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQV544TF571FDSK2H692P/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin create mode 100644 pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin create mode 100644 pkg/experiment/querybackend/block/writer.go create mode 100644 pkg/experiment/querybackend/block_reader.go create mode 100644 pkg/experiment/querybackend/client/client.go create mode 100644 pkg/experiment/querybackend/query.go create mode 100644 pkg/experiment/querybackend/query_label_names.go create mode 100644 pkg/experiment/querybackend/query_label_values.go create mode 100644 pkg/experiment/querybackend/query_profile_entry.go create mode 100644 pkg/experiment/querybackend/query_series_labels.go create mode 100644 pkg/experiment/querybackend/query_time_series.go create mode 100644 pkg/experiment/querybackend/query_tree.go create mode 100644 pkg/experiment/querybackend/queryplan/query_plan.go create mode 100644 pkg/experiment/querybackend/queryplan/query_plan_test.go create mode 100644 pkg/experiment/querybackend/queryplan/testdata/plan.txt create mode 100644 pkg/experiment/querybackend/report_aggregator.go create mode 100644 pkg/experiment/queryfrontend/frontend_meta.go create mode 100644 pkg/experiment/queryfrontend/frontend_profile_types.go rename pkg/frontend/{frontend_select_series.go => frontend_select_time_series.go} (96%) rename pkg/frontend/{frontend_series.go => frontend_series_labels.go} (100%) create mode 100644 pkg/model/labels_merger.go rename pkg/{iter => model}/profiles.go (77%) rename pkg/{iter => model}/profiles_test.go (86%) create mode 100644 pkg/model/time_series.go create mode 100644 pkg/model/time_series_builder.go rename pkg/model/{series.go => time_series_merger.go} (71%) rename pkg/model/{series_test.go => time_series_merger_test.go} (100%) create mode 100644 pkg/model/time_series_test.go create mode 100644 pkg/model/tree_merger.go create mode 100644 pkg/objstore/providers/memory/bucket_client.go create mode 100644 pkg/objstore/read_only_file.go create mode 100644 pkg/util/bufferpool/pool.go create mode 100644 pkg/util/bufferpool/pool_test.go create mode 100644 pkg/util/health/health.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9215438a97..4385f1886f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -63,10 +63,34 @@ jobs: doc-validator: runs-on: "ubuntu-latest" container: - image: "grafana/doc-validator:v4.1.1" + image: "grafana/doc-validator:v5.1.0" steps: - name: "Checkout code" - uses: "actions/checkout@v3" + uses: "actions/checkout@v4" + # reviewdog is having issues with large diffs. + # The issue https://github.com/reviewdog/reviewdog/issues/1696 is not + # yet fully solved (as of reviewdog 0.17.4). + # The workaround is to fetch PR head and merge base commits explicitly: + # Credits to https://github.com/grafana/deployment_tools/pull/162200. + # NB: fetch-depth=0 does not help (and is not recommended per se). + # TODO(kolesnikovae): Remove this workaround when the issue is fixed. + - name: Get merge commit between head SHA and base SHA + id: merge-commit + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const { data: { merge_base_commit } } = await github.rest.repos.compareCommitsWithBasehead({ + owner: context.repo.owner, + repo: context.repo.repo, + basehead: `${context.payload.pull_request.base.sha}...${context.payload.pull_request.head.sha}`, + }); + console.log(`Merge base commit: ${merge_base_commit.sha}`); + core.setOutput('merge-commit', merge_base_commit.sha); + - name: Fetch merge base and PR head + run: | + git config --system --add safe.directory '*' + git fetch --depth=1 origin "${{ steps.merge-commit.outputs.merge-commit }}" + git fetch --depth=1 origin "${{ github.event.pull_request.head.sha }}" - name: "Run doc-validator tool" run: > doc-validator @@ -81,6 +105,7 @@ jobs: --reporter=github-pr-review env: REVIEWDOG_GITHUB_API_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + REVIEWDOG_SKIP_GIT_FETCH: true build-image: if: github.event_name != 'push' diff --git a/.gitignore b/.gitignore index e04fd0a185..6ca9a4b5b1 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ pyroscope-sync/ data/ data-shared/ data-compactor/ +data-metastore/ .DS_Store **/dist diff --git a/.golangci.yml b/.golangci.yml index 7c43a84bbf..d050c0ae42 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -81,6 +81,8 @@ issues: exclude-dirs: - win_eventlog$ - pkg/og + - pkg/experiment + # which files to skip: they will be analyzed, but issues from them # won't be reported. Default value is empty list, but there is # no need to include all autogenerated files, we confidently recognize diff --git a/api/compactor/v1/compactor.proto b/api/compactor/v1/compactor.proto new file mode 100644 index 0000000000..513b0e7393 --- /dev/null +++ b/api/compactor/v1/compactor.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package compactor.v1; + +import "metastore/v1/metastore.proto"; + +service CompactionPlanner { + // Used to both retrieve jobs and update the jobs status at the same time. + rpc PollCompactionJobs(PollCompactionJobsRequest) returns (PollCompactionJobsResponse) {} + // Used for admin purposes only. + rpc GetCompactionJobs(GetCompactionRequest) returns (GetCompactionResponse) {} +} + +message PollCompactionJobsRequest { + // A batch of status updates for in-progress jobs from a worker. + repeated CompactionJobStatus job_status_updates = 1; + // How many new jobs a worker can be assigned to. + uint32 job_capacity = 2; +} + +message PollCompactionJobsResponse { + repeated CompactionJob compaction_jobs = 1; +} + +message GetCompactionRequest {} + +message GetCompactionResponse { + // A list of all compaction jobs + repeated CompactionJob compaction_jobs = 1; +} + +// One compaction job may result in multiple output blocks. +message CompactionJob { + // Unique name of the job. + string name = 1; + CompactionOptions options = 2; + // List of the input blocks. + repeated metastore.v1.BlockMeta blocks = 3; + CompactionJobStatus status = 4; + // Fencing token. + uint64 raft_log_index = 5; + // Shard the blocks belong to. + uint32 shard = 6; + // Optional, empty for compaction level 0. + string tenant_id = 7; + uint32 compaction_level = 8; +} + +message CompactionOptions { + // Compaction planner should instruct the compactor + // worker how to compact the blocks: + // - Limits and tenant overrides. + // - Feature flags. + + // How often the compaction worker should update + // the job status. If overdue, the job ownership + // is revoked. + uint64 status_update_interval_seconds = 1; +} + +message CompactionJobStatus { + string job_name = 1; + // Status update allows the planner to keep + // track of the job ownership and compaction + // progress: + // - If the job status is other than IN_PROGRESS, + // the ownership of the job is revoked. + // - FAILURE must only be sent if the failure is + // persistent and the compaction can't be accomplished. + // - completed_job must be empty if the status is + // other than SUCCESS, and vice-versa. + // - UNSPECIFIED must be sent if the worker rejects + // or cancels the compaction job. + // + // Partial results/status is not allowed. + CompactionStatus status = 2; + CompletedJob completed_job = 3; + // Fencing token. + uint64 raft_log_index = 4; + // Shard the blocks belong to. + uint32 shard = 5; + // Optional, empty for compaction level 0. + string tenant_id = 6; +} + +enum CompactionStatus { + COMPACTION_STATUS_UNSPECIFIED = 0; + COMPACTION_STATUS_IN_PROGRESS = 1; + COMPACTION_STATUS_SUCCESS = 2; + COMPACTION_STATUS_FAILURE = 3; +} + +message CompletedJob { + repeated metastore.v1.BlockMeta blocks = 1; +} diff --git a/api/gen/proto/go/compactor/v1/compactor.pb.go b/api/gen/proto/go/compactor/v1/compactor.pb.go new file mode 100644 index 0000000000..a51f77a522 --- /dev/null +++ b/api/gen/proto/go/compactor/v1/compactor.pb.go @@ -0,0 +1,860 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: compactor/v1/compactor.proto + +package compactorv1 + +import ( + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CompactionStatus int32 + +const ( + CompactionStatus_COMPACTION_STATUS_UNSPECIFIED CompactionStatus = 0 + CompactionStatus_COMPACTION_STATUS_IN_PROGRESS CompactionStatus = 1 + CompactionStatus_COMPACTION_STATUS_SUCCESS CompactionStatus = 2 + CompactionStatus_COMPACTION_STATUS_FAILURE CompactionStatus = 3 +) + +// Enum value maps for CompactionStatus. +var ( + CompactionStatus_name = map[int32]string{ + 0: "COMPACTION_STATUS_UNSPECIFIED", + 1: "COMPACTION_STATUS_IN_PROGRESS", + 2: "COMPACTION_STATUS_SUCCESS", + 3: "COMPACTION_STATUS_FAILURE", + } + CompactionStatus_value = map[string]int32{ + "COMPACTION_STATUS_UNSPECIFIED": 0, + "COMPACTION_STATUS_IN_PROGRESS": 1, + "COMPACTION_STATUS_SUCCESS": 2, + "COMPACTION_STATUS_FAILURE": 3, + } +) + +func (x CompactionStatus) Enum() *CompactionStatus { + p := new(CompactionStatus) + *p = x + return p +} + +func (x CompactionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CompactionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_compactor_v1_compactor_proto_enumTypes[0].Descriptor() +} + +func (CompactionStatus) Type() protoreflect.EnumType { + return &file_compactor_v1_compactor_proto_enumTypes[0] +} + +func (x CompactionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CompactionStatus.Descriptor instead. +func (CompactionStatus) EnumDescriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{0} +} + +type PollCompactionJobsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A batch of status updates for in-progress jobs from a worker. + JobStatusUpdates []*CompactionJobStatus `protobuf:"bytes,1,rep,name=job_status_updates,json=jobStatusUpdates,proto3" json:"job_status_updates,omitempty"` + // How many new jobs a worker can be assigned to. + JobCapacity uint32 `protobuf:"varint,2,opt,name=job_capacity,json=jobCapacity,proto3" json:"job_capacity,omitempty"` +} + +func (x *PollCompactionJobsRequest) Reset() { + *x = PollCompactionJobsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PollCompactionJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollCompactionJobsRequest) ProtoMessage() {} + +func (x *PollCompactionJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollCompactionJobsRequest.ProtoReflect.Descriptor instead. +func (*PollCompactionJobsRequest) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{0} +} + +func (x *PollCompactionJobsRequest) GetJobStatusUpdates() []*CompactionJobStatus { + if x != nil { + return x.JobStatusUpdates + } + return nil +} + +func (x *PollCompactionJobsRequest) GetJobCapacity() uint32 { + if x != nil { + return x.JobCapacity + } + return 0 +} + +type PollCompactionJobsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CompactionJobs []*CompactionJob `protobuf:"bytes,1,rep,name=compaction_jobs,json=compactionJobs,proto3" json:"compaction_jobs,omitempty"` +} + +func (x *PollCompactionJobsResponse) Reset() { + *x = PollCompactionJobsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PollCompactionJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollCompactionJobsResponse) ProtoMessage() {} + +func (x *PollCompactionJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollCompactionJobsResponse.ProtoReflect.Descriptor instead. +func (*PollCompactionJobsResponse) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{1} +} + +func (x *PollCompactionJobsResponse) GetCompactionJobs() []*CompactionJob { + if x != nil { + return x.CompactionJobs + } + return nil +} + +type GetCompactionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetCompactionRequest) Reset() { + *x = GetCompactionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompactionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompactionRequest) ProtoMessage() {} + +func (x *GetCompactionRequest) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompactionRequest.ProtoReflect.Descriptor instead. +func (*GetCompactionRequest) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{2} +} + +type GetCompactionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of all compaction jobs + CompactionJobs []*CompactionJob `protobuf:"bytes,1,rep,name=compaction_jobs,json=compactionJobs,proto3" json:"compaction_jobs,omitempty"` +} + +func (x *GetCompactionResponse) Reset() { + *x = GetCompactionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompactionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompactionResponse) ProtoMessage() {} + +func (x *GetCompactionResponse) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompactionResponse.ProtoReflect.Descriptor instead. +func (*GetCompactionResponse) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{3} +} + +func (x *GetCompactionResponse) GetCompactionJobs() []*CompactionJob { + if x != nil { + return x.CompactionJobs + } + return nil +} + +// One compaction job may result in multiple output blocks. +type CompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique name of the job. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options *CompactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + // List of the input blocks. + Blocks []*v1.BlockMeta `protobuf:"bytes,3,rep,name=blocks,proto3" json:"blocks,omitempty"` + Status *CompactionJobStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + // Fencing token. + RaftLogIndex uint64 `protobuf:"varint,5,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` + // Shard the blocks belong to. + Shard uint32 `protobuf:"varint,6,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional, empty for compaction level 0. + TenantId string `protobuf:"bytes,7,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + CompactionLevel uint32 `protobuf:"varint,8,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` +} + +func (x *CompactionJob) Reset() { + *x = CompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJob) ProtoMessage() {} + +func (x *CompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJob.ProtoReflect.Descriptor instead. +func (*CompactionJob) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{4} +} + +func (x *CompactionJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJob) GetOptions() *CompactionOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *CompactionJob) GetBlocks() []*v1.BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + +func (x *CompactionJob) GetStatus() *CompactionJobStatus { + if x != nil { + return x.Status + } + return nil +} + +func (x *CompactionJob) GetRaftLogIndex() uint64 { + if x != nil { + return x.RaftLogIndex + } + return 0 +} + +func (x *CompactionJob) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *CompactionJob) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *CompactionJob) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +type CompactionOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How often the compaction worker should update + // the job status. If overdue, the job ownership + // is revoked. + StatusUpdateIntervalSeconds uint64 `protobuf:"varint,1,opt,name=status_update_interval_seconds,json=statusUpdateIntervalSeconds,proto3" json:"status_update_interval_seconds,omitempty"` +} + +func (x *CompactionOptions) Reset() { + *x = CompactionOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionOptions) ProtoMessage() {} + +func (x *CompactionOptions) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionOptions.ProtoReflect.Descriptor instead. +func (*CompactionOptions) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{5} +} + +func (x *CompactionOptions) GetStatusUpdateIntervalSeconds() uint64 { + if x != nil { + return x.StatusUpdateIntervalSeconds + } + return 0 +} + +type CompactionJobStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` + // Status update allows the planner to keep + // track of the job ownership and compaction + // progress: + // - If the job status is other than IN_PROGRESS, + // the ownership of the job is revoked. + // - FAILURE must only be sent if the failure is + // persistent and the compaction can't be accomplished. + // - completed_job must be empty if the status is + // other than SUCCESS, and vice-versa. + // - UNSPECIFIED must be sent if the worker rejects + // or cancels the compaction job. + // + // Partial results/status is not allowed. + Status CompactionStatus `protobuf:"varint,2,opt,name=status,proto3,enum=compactor.v1.CompactionStatus" json:"status,omitempty"` + CompletedJob *CompletedJob `protobuf:"bytes,3,opt,name=completed_job,json=completedJob,proto3" json:"completed_job,omitempty"` + // Fencing token. + RaftLogIndex uint64 `protobuf:"varint,4,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` + // Shard the blocks belong to. + Shard uint32 `protobuf:"varint,5,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional, empty for compaction level 0. + TenantId string `protobuf:"bytes,6,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` +} + +func (x *CompactionJobStatus) Reset() { + *x = CompactionJobStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJobStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJobStatus) ProtoMessage() {} + +func (x *CompactionJobStatus) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJobStatus.ProtoReflect.Descriptor instead. +func (*CompactionJobStatus) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{6} +} + +func (x *CompactionJobStatus) GetJobName() string { + if x != nil { + return x.JobName + } + return "" +} + +func (x *CompactionJobStatus) GetStatus() CompactionStatus { + if x != nil { + return x.Status + } + return CompactionStatus_COMPACTION_STATUS_UNSPECIFIED +} + +func (x *CompactionJobStatus) GetCompletedJob() *CompletedJob { + if x != nil { + return x.CompletedJob + } + return nil +} + +func (x *CompactionJobStatus) GetRaftLogIndex() uint64 { + if x != nil { + return x.RaftLogIndex + } + return 0 +} + +func (x *CompactionJobStatus) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *CompactionJobStatus) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +type CompletedJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Blocks []*v1.BlockMeta `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *CompletedJob) Reset() { + *x = CompletedJob{} + if protoimpl.UnsafeEnabled { + mi := &file_compactor_v1_compactor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedJob) ProtoMessage() {} + +func (x *CompletedJob) ProtoReflect() protoreflect.Message { + mi := &file_compactor_v1_compactor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedJob.ProtoReflect.Descriptor instead. +func (*CompletedJob) Descriptor() ([]byte, []int) { + return file_compactor_v1_compactor_proto_rawDescGZIP(), []int{7} +} + +func (x *CompletedJob) GetBlocks() []*v1.BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + +var File_compactor_v1_compactor_proto protoreflect.FileDescriptor + +var file_compactor_v1_compactor_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x6d, 0x65, + 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x01, 0x0a, 0x19, 0x50, + 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x12, 0x6a, 0x6f, 0x62, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, + 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10, 0x6a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x6f, 0x62, + 0x5f, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0b, 0x6a, 0x6f, 0x62, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0x62, 0x0a, 0x1a, + 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, + 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, + 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, + 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x44, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x22, 0xce, 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x61, + 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x29, 0x0a, + 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x58, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x43, 0x0a, + 0x1e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x22, 0x82, 0x02, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, + 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, + 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, + 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, + 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x24, + 0x0a, 0x0e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x3f, 0x0a, 0x0c, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x2a, 0x96, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, + 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, + 0x53, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, + 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, + 0x03, 0x32, 0xde, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x12, 0x69, 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x6c, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x27, 0x2e, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, + 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x22, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x42, 0xbb, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, + 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x76, + 0x31, 0xa2, 0x02, 0x03, 0x43, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x6f, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_compactor_v1_compactor_proto_rawDescOnce sync.Once + file_compactor_v1_compactor_proto_rawDescData = file_compactor_v1_compactor_proto_rawDesc +) + +func file_compactor_v1_compactor_proto_rawDescGZIP() []byte { + file_compactor_v1_compactor_proto_rawDescOnce.Do(func() { + file_compactor_v1_compactor_proto_rawDescData = protoimpl.X.CompressGZIP(file_compactor_v1_compactor_proto_rawDescData) + }) + return file_compactor_v1_compactor_proto_rawDescData +} + +var file_compactor_v1_compactor_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_compactor_v1_compactor_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_compactor_v1_compactor_proto_goTypes = []any{ + (CompactionStatus)(0), // 0: compactor.v1.CompactionStatus + (*PollCompactionJobsRequest)(nil), // 1: compactor.v1.PollCompactionJobsRequest + (*PollCompactionJobsResponse)(nil), // 2: compactor.v1.PollCompactionJobsResponse + (*GetCompactionRequest)(nil), // 3: compactor.v1.GetCompactionRequest + (*GetCompactionResponse)(nil), // 4: compactor.v1.GetCompactionResponse + (*CompactionJob)(nil), // 5: compactor.v1.CompactionJob + (*CompactionOptions)(nil), // 6: compactor.v1.CompactionOptions + (*CompactionJobStatus)(nil), // 7: compactor.v1.CompactionJobStatus + (*CompletedJob)(nil), // 8: compactor.v1.CompletedJob + (*v1.BlockMeta)(nil), // 9: metastore.v1.BlockMeta +} +var file_compactor_v1_compactor_proto_depIdxs = []int32{ + 7, // 0: compactor.v1.PollCompactionJobsRequest.job_status_updates:type_name -> compactor.v1.CompactionJobStatus + 5, // 1: compactor.v1.PollCompactionJobsResponse.compaction_jobs:type_name -> compactor.v1.CompactionJob + 5, // 2: compactor.v1.GetCompactionResponse.compaction_jobs:type_name -> compactor.v1.CompactionJob + 6, // 3: compactor.v1.CompactionJob.options:type_name -> compactor.v1.CompactionOptions + 9, // 4: compactor.v1.CompactionJob.blocks:type_name -> metastore.v1.BlockMeta + 7, // 5: compactor.v1.CompactionJob.status:type_name -> compactor.v1.CompactionJobStatus + 0, // 6: compactor.v1.CompactionJobStatus.status:type_name -> compactor.v1.CompactionStatus + 8, // 7: compactor.v1.CompactionJobStatus.completed_job:type_name -> compactor.v1.CompletedJob + 9, // 8: compactor.v1.CompletedJob.blocks:type_name -> metastore.v1.BlockMeta + 1, // 9: compactor.v1.CompactionPlanner.PollCompactionJobs:input_type -> compactor.v1.PollCompactionJobsRequest + 3, // 10: compactor.v1.CompactionPlanner.GetCompactionJobs:input_type -> compactor.v1.GetCompactionRequest + 2, // 11: compactor.v1.CompactionPlanner.PollCompactionJobs:output_type -> compactor.v1.PollCompactionJobsResponse + 4, // 12: compactor.v1.CompactionPlanner.GetCompactionJobs:output_type -> compactor.v1.GetCompactionResponse + 11, // [11:13] is the sub-list for method output_type + 9, // [9:11] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_compactor_v1_compactor_proto_init() } +func file_compactor_v1_compactor_proto_init() { + if File_compactor_v1_compactor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_compactor_v1_compactor_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*PollCompactionJobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*PollCompactionJobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetCompactionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetCompactionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*CompactionOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJobStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_compactor_v1_compactor_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*CompletedJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_compactor_v1_compactor_proto_rawDesc, + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_compactor_v1_compactor_proto_goTypes, + DependencyIndexes: file_compactor_v1_compactor_proto_depIdxs, + EnumInfos: file_compactor_v1_compactor_proto_enumTypes, + MessageInfos: file_compactor_v1_compactor_proto_msgTypes, + }.Build() + File_compactor_v1_compactor_proto = out.File + file_compactor_v1_compactor_proto_rawDesc = nil + file_compactor_v1_compactor_proto_goTypes = nil + file_compactor_v1_compactor_proto_depIdxs = nil +} diff --git a/api/gen/proto/go/compactor/v1/compactor_vtproto.pb.go b/api/gen/proto/go/compactor/v1/compactor_vtproto.pb.go new file mode 100644 index 0000000000..6577c16531 --- /dev/null +++ b/api/gen/proto/go/compactor/v1/compactor_vtproto.pb.go @@ -0,0 +1,2212 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: compactor/v1/compactor.proto + +package compactorv1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *PollCompactionJobsRequest) CloneVT() *PollCompactionJobsRequest { + if m == nil { + return (*PollCompactionJobsRequest)(nil) + } + r := new(PollCompactionJobsRequest) + r.JobCapacity = m.JobCapacity + if rhs := m.JobStatusUpdates; rhs != nil { + tmpContainer := make([]*CompactionJobStatus, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.JobStatusUpdates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PollCompactionJobsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PollCompactionJobsResponse) CloneVT() *PollCompactionJobsResponse { + if m == nil { + return (*PollCompactionJobsResponse)(nil) + } + r := new(PollCompactionJobsResponse) + if rhs := m.CompactionJobs; rhs != nil { + tmpContainer := make([]*CompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CompactionJobs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PollCompactionJobsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCompactionRequest) CloneVT() *GetCompactionRequest { + if m == nil { + return (*GetCompactionRequest)(nil) + } + r := new(GetCompactionRequest) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCompactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCompactionResponse) CloneVT() *GetCompactionResponse { + if m == nil { + return (*GetCompactionResponse)(nil) + } + r := new(GetCompactionResponse) + if rhs := m.CompactionJobs; rhs != nil { + tmpContainer := make([]*CompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CompactionJobs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCompactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionJob) CloneVT() *CompactionJob { + if m == nil { + return (*CompactionJob)(nil) + } + r := new(CompactionJob) + r.Name = m.Name + r.Options = m.Options.CloneVT() + r.Status = m.Status.CloneVT() + r.RaftLogIndex = m.RaftLogIndex + r.Shard = m.Shard + r.TenantId = m.TenantId + r.CompactionLevel = m.CompactionLevel + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]*v1.BlockMeta, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v1.BlockMeta }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v1.BlockMeta) + } + } + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionOptions) CloneVT() *CompactionOptions { + if m == nil { + return (*CompactionOptions)(nil) + } + r := new(CompactionOptions) + r.StatusUpdateIntervalSeconds = m.StatusUpdateIntervalSeconds + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionJobStatus) CloneVT() *CompactionJobStatus { + if m == nil { + return (*CompactionJobStatus)(nil) + } + r := new(CompactionJobStatus) + r.JobName = m.JobName + r.Status = m.Status + r.CompletedJob = m.CompletedJob.CloneVT() + r.RaftLogIndex = m.RaftLogIndex + r.Shard = m.Shard + r.TenantId = m.TenantId + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionJobStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompletedJob) CloneVT() *CompletedJob { + if m == nil { + return (*CompletedJob)(nil) + } + r := new(CompletedJob) + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]*v1.BlockMeta, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v1.BlockMeta }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v1.BlockMeta) + } + } + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompletedJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *PollCompactionJobsRequest) EqualVT(that *PollCompactionJobsRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.JobStatusUpdates) != len(that.JobStatusUpdates) { + return false + } + for i, vx := range this.JobStatusUpdates { + vy := that.JobStatusUpdates[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompactionJobStatus{} + } + if q == nil { + q = &CompactionJobStatus{} + } + if !p.EqualVT(q) { + return false + } + } + } + if this.JobCapacity != that.JobCapacity { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *PollCompactionJobsRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*PollCompactionJobsRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *PollCompactionJobsResponse) EqualVT(that *PollCompactionJobsResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.CompactionJobs) != len(that.CompactionJobs) { + return false + } + for i, vx := range this.CompactionJobs { + vy := that.CompactionJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompactionJob{} + } + if q == nil { + q = &CompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *PollCompactionJobsResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*PollCompactionJobsResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *GetCompactionRequest) EqualVT(that *GetCompactionRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetCompactionRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetCompactionRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *GetCompactionResponse) EqualVT(that *GetCompactionResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.CompactionJobs) != len(that.CompactionJobs) { + return false + } + for i, vx := range this.CompactionJobs { + vy := that.CompactionJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompactionJob{} + } + if q == nil { + q = &CompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetCompactionResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetCompactionResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionJob) EqualVT(that *CompactionJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if !this.Options.EqualVT(that.Options) { + return false + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v1.BlockMeta{} + } + if q == nil { + q = &v1.BlockMeta{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v1.BlockMeta) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + if !this.Status.EqualVT(that.Status) { + return false + } + if this.RaftLogIndex != that.RaftLogIndex { + return false + } + if this.Shard != that.Shard { + return false + } + if this.TenantId != that.TenantId { + return false + } + if this.CompactionLevel != that.CompactionLevel { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJob) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionOptions) EqualVT(that *CompactionOptions) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.StatusUpdateIntervalSeconds != that.StatusUpdateIntervalSeconds { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionOptions) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionOptions) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionJobStatus) EqualVT(that *CompactionJobStatus) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.JobName != that.JobName { + return false + } + if this.Status != that.Status { + return false + } + if !this.CompletedJob.EqualVT(that.CompletedJob) { + return false + } + if this.RaftLogIndex != that.RaftLogIndex { + return false + } + if this.Shard != that.Shard { + return false + } + if this.TenantId != that.TenantId { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionJobStatus) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobStatus) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompletedJob) EqualVT(that *CompletedJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v1.BlockMeta{} + } + if q == nil { + q = &v1.BlockMeta{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v1.BlockMeta) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompletedJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompletedJob) + if !ok { + return false + } + return this.EqualVT(that) +} + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// CompactionPlannerClient is the client API for CompactionPlanner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CompactionPlannerClient interface { + // Used to both retrieve jobs and update the jobs status at the same time. + PollCompactionJobs(ctx context.Context, in *PollCompactionJobsRequest, opts ...grpc.CallOption) (*PollCompactionJobsResponse, error) + // Used for admin purposes only. + GetCompactionJobs(ctx context.Context, in *GetCompactionRequest, opts ...grpc.CallOption) (*GetCompactionResponse, error) +} + +type compactionPlannerClient struct { + cc grpc.ClientConnInterface +} + +func NewCompactionPlannerClient(cc grpc.ClientConnInterface) CompactionPlannerClient { + return &compactionPlannerClient{cc} +} + +func (c *compactionPlannerClient) PollCompactionJobs(ctx context.Context, in *PollCompactionJobsRequest, opts ...grpc.CallOption) (*PollCompactionJobsResponse, error) { + out := new(PollCompactionJobsResponse) + err := c.cc.Invoke(ctx, "/compactor.v1.CompactionPlanner/PollCompactionJobs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *compactionPlannerClient) GetCompactionJobs(ctx context.Context, in *GetCompactionRequest, opts ...grpc.CallOption) (*GetCompactionResponse, error) { + out := new(GetCompactionResponse) + err := c.cc.Invoke(ctx, "/compactor.v1.CompactionPlanner/GetCompactionJobs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CompactionPlannerServer is the server API for CompactionPlanner service. +// All implementations must embed UnimplementedCompactionPlannerServer +// for forward compatibility +type CompactionPlannerServer interface { + // Used to both retrieve jobs and update the jobs status at the same time. + PollCompactionJobs(context.Context, *PollCompactionJobsRequest) (*PollCompactionJobsResponse, error) + // Used for admin purposes only. + GetCompactionJobs(context.Context, *GetCompactionRequest) (*GetCompactionResponse, error) + mustEmbedUnimplementedCompactionPlannerServer() +} + +// UnimplementedCompactionPlannerServer must be embedded to have forward compatible implementations. +type UnimplementedCompactionPlannerServer struct { +} + +func (UnimplementedCompactionPlannerServer) PollCompactionJobs(context.Context, *PollCompactionJobsRequest) (*PollCompactionJobsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PollCompactionJobs not implemented") +} +func (UnimplementedCompactionPlannerServer) GetCompactionJobs(context.Context, *GetCompactionRequest) (*GetCompactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCompactionJobs not implemented") +} +func (UnimplementedCompactionPlannerServer) mustEmbedUnimplementedCompactionPlannerServer() {} + +// UnsafeCompactionPlannerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CompactionPlannerServer will +// result in compilation errors. +type UnsafeCompactionPlannerServer interface { + mustEmbedUnimplementedCompactionPlannerServer() +} + +func RegisterCompactionPlannerServer(s grpc.ServiceRegistrar, srv CompactionPlannerServer) { + s.RegisterService(&CompactionPlanner_ServiceDesc, srv) +} + +func _CompactionPlanner_PollCompactionJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PollCompactionJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CompactionPlannerServer).PollCompactionJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/compactor.v1.CompactionPlanner/PollCompactionJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CompactionPlannerServer).PollCompactionJobs(ctx, req.(*PollCompactionJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CompactionPlanner_GetCompactionJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CompactionPlannerServer).GetCompactionJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/compactor.v1.CompactionPlanner/GetCompactionJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CompactionPlannerServer).GetCompactionJobs(ctx, req.(*GetCompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CompactionPlanner_ServiceDesc is the grpc.ServiceDesc for CompactionPlanner service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CompactionPlanner_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "compactor.v1.CompactionPlanner", + HandlerType: (*CompactionPlannerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PollCompactionJobs", + Handler: _CompactionPlanner_PollCompactionJobs_Handler, + }, + { + MethodName: "GetCompactionJobs", + Handler: _CompactionPlanner_GetCompactionJobs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "compactor/v1/compactor.proto", +} + +func (m *PollCompactionJobsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PollCompactionJobsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PollCompactionJobsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.JobCapacity != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.JobCapacity)) + i-- + dAtA[i] = 0x10 + } + if len(m.JobStatusUpdates) > 0 { + for iNdEx := len(m.JobStatusUpdates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.JobStatusUpdates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PollCompactionJobsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PollCompactionJobsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PollCompactionJobsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CompactionJobs) > 0 { + for iNdEx := len(m.CompactionJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CompactionJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetCompactionRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCompactionRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetCompactionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetCompactionResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCompactionResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetCompactionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CompactionJobs) > 0 { + for iNdEx := len(m.CompactionJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CompactionJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x40 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x3a + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x30 + } + if m.RaftLogIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) + i-- + dAtA[i] = 0x28 + } + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Blocks[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Blocks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompactionOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.StatusUpdateIntervalSeconds != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.StatusUpdateIntervalSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CompactionJobStatus) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJobStatus) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJobStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x32 + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x28 + } + if m.RaftLogIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) + i-- + dAtA[i] = 0x20 + } + if m.CompletedJob != nil { + size, err := m.CompletedJob.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x10 + } + if len(m.JobName) > 0 { + i -= len(m.JobName) + copy(dAtA[i:], m.JobName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.JobName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompletedJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompletedJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompletedJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Blocks[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Blocks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PollCompactionJobsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.JobStatusUpdates) > 0 { + for _, e := range m.JobStatusUpdates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.JobCapacity != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.JobCapacity)) + } + n += len(m.unknownFields) + return n +} + +func (m *PollCompactionJobsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CompactionJobs) > 0 { + for _, e := range m.CompactionJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetCompactionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCompactionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CompactionJobs) > 0 { + for _, e := range m.CompactionJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RaftLogIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + l = len(m.TenantId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusUpdateIntervalSeconds != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.StatusUpdateIntervalSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJobStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JobName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.CompletedJob != nil { + l = m.CompletedJob.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.RaftLogIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + l = len(m.TenantId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompletedJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PollCompactionJobsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PollCompactionJobsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PollCompactionJobsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobStatusUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobStatusUpdates = append(m.JobStatusUpdates, &CompactionJobStatus{}) + if err := m.JobStatusUpdates[len(m.JobStatusUpdates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JobCapacity", wireType) + } + m.JobCapacity = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.JobCapacity |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PollCompactionJobsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PollCompactionJobsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PollCompactionJobsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompactionJobs = append(m.CompactionJobs, &CompactionJob{}) + if err := m.CompactionJobs[len(m.CompactionJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCompactionRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCompactionResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompactionJobs = append(m.CompactionJobs, &CompactionJob{}) + if err := m.CompactionJobs[len(m.CompactionJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &CompactionOptions{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &v1.BlockMeta{}) + if unmarshal, ok := interface{}(m.Blocks[len(m.Blocks)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Blocks[len(m.Blocks)-1]); err != nil { + return err + } + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &CompactionJobStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) + } + m.RaftLogIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftLogIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdateIntervalSeconds", wireType) + } + m.StatusUpdateIntervalSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StatusUpdateIntervalSeconds |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JobName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= CompactionStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedJob", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedJob == nil { + m.CompletedJob = &CompletedJob{} + } + if err := m.CompletedJob.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) + } + m.RaftLogIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftLogIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompletedJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompletedJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompletedJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &v1.BlockMeta{}) + if unmarshal, ok := interface{}(m.Blocks[len(m.Blocks)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Blocks[len(m.Blocks)-1]); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.go b/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.go new file mode 100644 index 0000000000..fb405e9d27 --- /dev/null +++ b/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.go @@ -0,0 +1,147 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: compactor/v1/compactor.proto + +package compactorv1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // CompactionPlannerName is the fully-qualified name of the CompactionPlanner service. + CompactionPlannerName = "compactor.v1.CompactionPlanner" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // CompactionPlannerPollCompactionJobsProcedure is the fully-qualified name of the + // CompactionPlanner's PollCompactionJobs RPC. + CompactionPlannerPollCompactionJobsProcedure = "/compactor.v1.CompactionPlanner/PollCompactionJobs" + // CompactionPlannerGetCompactionJobsProcedure is the fully-qualified name of the + // CompactionPlanner's GetCompactionJobs RPC. + CompactionPlannerGetCompactionJobsProcedure = "/compactor.v1.CompactionPlanner/GetCompactionJobs" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + compactionPlannerServiceDescriptor = v1.File_compactor_v1_compactor_proto.Services().ByName("CompactionPlanner") + compactionPlannerPollCompactionJobsMethodDescriptor = compactionPlannerServiceDescriptor.Methods().ByName("PollCompactionJobs") + compactionPlannerGetCompactionJobsMethodDescriptor = compactionPlannerServiceDescriptor.Methods().ByName("GetCompactionJobs") +) + +// CompactionPlannerClient is a client for the compactor.v1.CompactionPlanner service. +type CompactionPlannerClient interface { + // Used to both retrieve jobs and update the jobs status at the same time. + PollCompactionJobs(context.Context, *connect.Request[v1.PollCompactionJobsRequest]) (*connect.Response[v1.PollCompactionJobsResponse], error) + // Used for admin purposes only. + GetCompactionJobs(context.Context, *connect.Request[v1.GetCompactionRequest]) (*connect.Response[v1.GetCompactionResponse], error) +} + +// NewCompactionPlannerClient constructs a client for the compactor.v1.CompactionPlanner service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewCompactionPlannerClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) CompactionPlannerClient { + baseURL = strings.TrimRight(baseURL, "/") + return &compactionPlannerClient{ + pollCompactionJobs: connect.NewClient[v1.PollCompactionJobsRequest, v1.PollCompactionJobsResponse]( + httpClient, + baseURL+CompactionPlannerPollCompactionJobsProcedure, + connect.WithSchema(compactionPlannerPollCompactionJobsMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getCompactionJobs: connect.NewClient[v1.GetCompactionRequest, v1.GetCompactionResponse]( + httpClient, + baseURL+CompactionPlannerGetCompactionJobsProcedure, + connect.WithSchema(compactionPlannerGetCompactionJobsMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// compactionPlannerClient implements CompactionPlannerClient. +type compactionPlannerClient struct { + pollCompactionJobs *connect.Client[v1.PollCompactionJobsRequest, v1.PollCompactionJobsResponse] + getCompactionJobs *connect.Client[v1.GetCompactionRequest, v1.GetCompactionResponse] +} + +// PollCompactionJobs calls compactor.v1.CompactionPlanner.PollCompactionJobs. +func (c *compactionPlannerClient) PollCompactionJobs(ctx context.Context, req *connect.Request[v1.PollCompactionJobsRequest]) (*connect.Response[v1.PollCompactionJobsResponse], error) { + return c.pollCompactionJobs.CallUnary(ctx, req) +} + +// GetCompactionJobs calls compactor.v1.CompactionPlanner.GetCompactionJobs. +func (c *compactionPlannerClient) GetCompactionJobs(ctx context.Context, req *connect.Request[v1.GetCompactionRequest]) (*connect.Response[v1.GetCompactionResponse], error) { + return c.getCompactionJobs.CallUnary(ctx, req) +} + +// CompactionPlannerHandler is an implementation of the compactor.v1.CompactionPlanner service. +type CompactionPlannerHandler interface { + // Used to both retrieve jobs and update the jobs status at the same time. + PollCompactionJobs(context.Context, *connect.Request[v1.PollCompactionJobsRequest]) (*connect.Response[v1.PollCompactionJobsResponse], error) + // Used for admin purposes only. + GetCompactionJobs(context.Context, *connect.Request[v1.GetCompactionRequest]) (*connect.Response[v1.GetCompactionResponse], error) +} + +// NewCompactionPlannerHandler builds an HTTP handler from the service implementation. It returns +// the path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewCompactionPlannerHandler(svc CompactionPlannerHandler, opts ...connect.HandlerOption) (string, http.Handler) { + compactionPlannerPollCompactionJobsHandler := connect.NewUnaryHandler( + CompactionPlannerPollCompactionJobsProcedure, + svc.PollCompactionJobs, + connect.WithSchema(compactionPlannerPollCompactionJobsMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + compactionPlannerGetCompactionJobsHandler := connect.NewUnaryHandler( + CompactionPlannerGetCompactionJobsProcedure, + svc.GetCompactionJobs, + connect.WithSchema(compactionPlannerGetCompactionJobsMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/compactor.v1.CompactionPlanner/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case CompactionPlannerPollCompactionJobsProcedure: + compactionPlannerPollCompactionJobsHandler.ServeHTTP(w, r) + case CompactionPlannerGetCompactionJobsProcedure: + compactionPlannerGetCompactionJobsHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedCompactionPlannerHandler returns CodeUnimplemented from all methods. +type UnimplementedCompactionPlannerHandler struct{} + +func (UnimplementedCompactionPlannerHandler) PollCompactionJobs(context.Context, *connect.Request[v1.PollCompactionJobsRequest]) (*connect.Response[v1.PollCompactionJobsResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("compactor.v1.CompactionPlanner.PollCompactionJobs is not implemented")) +} + +func (UnimplementedCompactionPlannerHandler) GetCompactionJobs(context.Context, *connect.Request[v1.GetCompactionRequest]) (*connect.Response[v1.GetCompactionResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("compactor.v1.CompactionPlanner.GetCompactionJobs is not implemented")) +} diff --git a/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.mux.go b/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.mux.go new file mode 100644 index 0000000000..cd93b1a6f3 --- /dev/null +++ b/api/gen/proto/go/compactor/v1/compactorv1connect/compactor.connect.mux.go @@ -0,0 +1,32 @@ +// Code generated by protoc-gen-connect-go-mux. DO NOT EDIT. +// +// Source: compactor/v1/compactor.proto + +package compactorv1connect + +import ( + connect "connectrpc.com/connect" + mux "github.com/gorilla/mux" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion0_1_0 + +// RegisterCompactionPlannerHandler register an HTTP handler to a mux.Router from the service +// implementation. +func RegisterCompactionPlannerHandler(mux *mux.Router, svc CompactionPlannerHandler, opts ...connect.HandlerOption) { + mux.Handle("/compactor.v1.CompactionPlanner/PollCompactionJobs", connect.NewUnaryHandler( + "/compactor.v1.CompactionPlanner/PollCompactionJobs", + svc.PollCompactionJobs, + opts..., + )) + mux.Handle("/compactor.v1.CompactionPlanner/GetCompactionJobs", connect.NewUnaryHandler( + "/compactor.v1.CompactionPlanner/GetCompactionJobs", + svc.GetCompactionJobs, + opts..., + )) +} diff --git a/api/gen/proto/go/metastore/v1/metastore.pb.go b/api/gen/proto/go/metastore/v1/metastore.pb.go new file mode 100644 index 0000000000..90dbbf42d0 --- /dev/null +++ b/api/gen/proto/go/metastore/v1/metastore.pb.go @@ -0,0 +1,799 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: metastore/v1/metastore.proto + +package metastorev1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AddBlockRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *BlockMeta `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *AddBlockRequest) Reset() { + *x = AddBlockRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddBlockRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddBlockRequest) ProtoMessage() {} + +func (x *AddBlockRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddBlockRequest.ProtoReflect.Descriptor instead. +func (*AddBlockRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{0} +} + +func (x *AddBlockRequest) GetBlock() *BlockMeta { + if x != nil { + return x.Block + } + return nil +} + +type AddBlockResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AddBlockResponse) Reset() { + *x = AddBlockResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddBlockResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddBlockResponse) ProtoMessage() {} + +func (x *AddBlockResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddBlockResponse.ProtoReflect.Descriptor instead. +func (*AddBlockResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{1} +} + +type BlockMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FormatVersion uint64 `protobuf:"varint,1,opt,name=format_version,json=formatVersion,proto3" json:"format_version,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + MinTime int64 `protobuf:"varint,3,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,4,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Shard uint32 `protobuf:"varint,5,opt,name=shard,proto3" json:"shard,omitempty"` + CompactionLevel uint32 `protobuf:"varint,6,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + // Optional. Empty if compaction level is 0. + TenantId string `protobuf:"bytes,7,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + // TODO(kolesnikovae): Partitions with labels? + TenantServices []*TenantService `protobuf:"bytes,8,rep,name=tenant_services,json=tenantServices,proto3" json:"tenant_services,omitempty"` + Size uint64 `protobuf:"varint,9,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *BlockMeta) Reset() { + *x = BlockMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockMeta) ProtoMessage() {} + +func (x *BlockMeta) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockMeta.ProtoReflect.Descriptor instead. +func (*BlockMeta) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{2} +} + +func (x *BlockMeta) GetFormatVersion() uint64 { + if x != nil { + return x.FormatVersion + } + return 0 +} + +func (x *BlockMeta) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *BlockMeta) GetMinTime() int64 { + if x != nil { + return x.MinTime + } + return 0 +} + +func (x *BlockMeta) GetMaxTime() int64 { + if x != nil { + return x.MaxTime + } + return 0 +} + +func (x *BlockMeta) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *BlockMeta) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *BlockMeta) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *BlockMeta) GetTenantServices() []*TenantService { + if x != nil { + return x.TenantServices + } + return nil +} + +func (x *BlockMeta) GetSize() uint64 { + if x != nil { + return x.Size + } + return 0 +} + +// TenantService object points to the offset in the block at which +// the tenant service data is located. +type TenantService struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TenantId string `protobuf:"bytes,1,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + MinTime int64 `protobuf:"varint,3,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,4,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + // Table of contents lists data sections within the tenant + // service region. The offsets are absolute. + // + // The interpretation of the table of contents is specific + // to the metadata format version. By default, the sections are: + // - 0: profiles.parquet + // - 1: index.tsdb + // - 2: symbols.symdb + TableOfContents []uint64 `protobuf:"varint,5,rep,packed,name=table_of_contents,json=tableOfContents,proto3" json:"table_of_contents,omitempty"` + // Size of the section in bytes. + Size uint64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + // Profile types present in the tenant service data. + ProfileTypes []string `protobuf:"bytes,7,rep,name=profile_types,json=profileTypes,proto3" json:"profile_types,omitempty"` +} + +func (x *TenantService) Reset() { + *x = TenantService{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TenantService) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TenantService) ProtoMessage() {} + +func (x *TenantService) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TenantService.ProtoReflect.Descriptor instead. +func (*TenantService) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{3} +} + +func (x *TenantService) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *TenantService) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TenantService) GetMinTime() int64 { + if x != nil { + return x.MinTime + } + return 0 +} + +func (x *TenantService) GetMaxTime() int64 { + if x != nil { + return x.MaxTime + } + return 0 +} + +func (x *TenantService) GetTableOfContents() []uint64 { + if x != nil { + return x.TableOfContents + } + return nil +} + +func (x *TenantService) GetSize() uint64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *TenantService) GetProfileTypes() []string { + if x != nil { + return x.ProfileTypes + } + return nil +} + +type ListBlocksForQueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TenantId []string `protobuf:"bytes,1,rep,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + StartTime int64 `protobuf:"varint,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` +} + +func (x *ListBlocksForQueryRequest) Reset() { + *x = ListBlocksForQueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBlocksForQueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBlocksForQueryRequest) ProtoMessage() {} + +func (x *ListBlocksForQueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBlocksForQueryRequest.ProtoReflect.Descriptor instead. +func (*ListBlocksForQueryRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{4} +} + +func (x *ListBlocksForQueryRequest) GetTenantId() []string { + if x != nil { + return x.TenantId + } + return nil +} + +func (x *ListBlocksForQueryRequest) GetStartTime() int64 { + if x != nil { + return x.StartTime + } + return 0 +} + +func (x *ListBlocksForQueryRequest) GetEndTime() int64 { + if x != nil { + return x.EndTime + } + return 0 +} + +func (x *ListBlocksForQueryRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +type ListBlocksForQueryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Blocks []*BlockMeta `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *ListBlocksForQueryResponse) Reset() { + *x = ListBlocksForQueryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListBlocksForQueryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBlocksForQueryResponse) ProtoMessage() {} + +func (x *ListBlocksForQueryResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBlocksForQueryResponse.ProtoReflect.Descriptor instead. +func (*ListBlocksForQueryResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{5} +} + +func (x *ListBlocksForQueryResponse) GetBlocks() []*BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + +type ReadIndexRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DebugRequestId string `protobuf:"bytes,1,opt,name=debug_request_id,json=debugRequestId,proto3" json:"debug_request_id,omitempty"` // for debug logging, // todo delete +} + +func (x *ReadIndexRequest) Reset() { + *x = ReadIndexRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadIndexRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadIndexRequest) ProtoMessage() {} + +func (x *ReadIndexRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadIndexRequest.ProtoReflect.Descriptor instead. +func (*ReadIndexRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{6} +} + +func (x *ReadIndexRequest) GetDebugRequestId() string { + if x != nil { + return x.DebugRequestId + } + return "" +} + +type ReadIndexResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReadIndex uint64 `protobuf:"varint,1,opt,name=read_index,json=readIndex,proto3" json:"read_index,omitempty"` +} + +func (x *ReadIndexResponse) Reset() { + *x = ReadIndexResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_metastore_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadIndexResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadIndexResponse) ProtoMessage() {} + +func (x *ReadIndexResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_metastore_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadIndexResponse.ProtoReflect.Descriptor instead. +func (*ReadIndexResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_metastore_proto_rawDescGZIP(), []int{7} +} + +func (x *ReadIndexResponse) GetReadIndex() uint64 { + if x != nil { + return x.ReadIndex + } + return 0 +} + +var File_metastore_v1_metastore_proto protoreflect.FileDescriptor + +var file_metastore_v1_metastore_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x40, 0x0a, 0x0f, + 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x12, + 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xb0, 0x02, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, 0x6e, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x0f, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x0e, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x0d, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x4f, 0x66, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x46, 0x6f, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x4d, + 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, 0x6f, 0x72, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x3c, 0x0a, + 0x10, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x11, 0x52, + 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x32, + 0x9a, 0x02, 0x0a, 0x10, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x69, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, + 0x6f, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x46, 0x6f, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x46, 0x6f, 0x72, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x09, + 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x2e, 0x6d, 0x65, 0x74, 0x61, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x65, 0x74, 0x61, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xbb, 0x01, 0x0a, + 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x42, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x3b, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x4d, 0x58, + 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, + 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_metastore_v1_metastore_proto_rawDescOnce sync.Once + file_metastore_v1_metastore_proto_rawDescData = file_metastore_v1_metastore_proto_rawDesc +) + +func file_metastore_v1_metastore_proto_rawDescGZIP() []byte { + file_metastore_v1_metastore_proto_rawDescOnce.Do(func() { + file_metastore_v1_metastore_proto_rawDescData = protoimpl.X.CompressGZIP(file_metastore_v1_metastore_proto_rawDescData) + }) + return file_metastore_v1_metastore_proto_rawDescData +} + +var file_metastore_v1_metastore_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_metastore_v1_metastore_proto_goTypes = []any{ + (*AddBlockRequest)(nil), // 0: metastore.v1.AddBlockRequest + (*AddBlockResponse)(nil), // 1: metastore.v1.AddBlockResponse + (*BlockMeta)(nil), // 2: metastore.v1.BlockMeta + (*TenantService)(nil), // 3: metastore.v1.TenantService + (*ListBlocksForQueryRequest)(nil), // 4: metastore.v1.ListBlocksForQueryRequest + (*ListBlocksForQueryResponse)(nil), // 5: metastore.v1.ListBlocksForQueryResponse + (*ReadIndexRequest)(nil), // 6: metastore.v1.ReadIndexRequest + (*ReadIndexResponse)(nil), // 7: metastore.v1.ReadIndexResponse +} +var file_metastore_v1_metastore_proto_depIdxs = []int32{ + 2, // 0: metastore.v1.AddBlockRequest.block:type_name -> metastore.v1.BlockMeta + 3, // 1: metastore.v1.BlockMeta.tenant_services:type_name -> metastore.v1.TenantService + 2, // 2: metastore.v1.ListBlocksForQueryResponse.blocks:type_name -> metastore.v1.BlockMeta + 0, // 3: metastore.v1.MetastoreService.AddBlock:input_type -> metastore.v1.AddBlockRequest + 4, // 4: metastore.v1.MetastoreService.ListBlocksForQuery:input_type -> metastore.v1.ListBlocksForQueryRequest + 6, // 5: metastore.v1.MetastoreService.ReadIndex:input_type -> metastore.v1.ReadIndexRequest + 1, // 6: metastore.v1.MetastoreService.AddBlock:output_type -> metastore.v1.AddBlockResponse + 5, // 7: metastore.v1.MetastoreService.ListBlocksForQuery:output_type -> metastore.v1.ListBlocksForQueryResponse + 7, // 8: metastore.v1.MetastoreService.ReadIndex:output_type -> metastore.v1.ReadIndexResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_metastore_v1_metastore_proto_init() } +func file_metastore_v1_metastore_proto_init() { + if File_metastore_v1_metastore_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_metastore_v1_metastore_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*AddBlockRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*AddBlockResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*BlockMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*TenantService); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ListBlocksForQueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListBlocksForQueryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ReadIndexRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_metastore_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ReadIndexResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_metastore_v1_metastore_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_metastore_v1_metastore_proto_goTypes, + DependencyIndexes: file_metastore_v1_metastore_proto_depIdxs, + MessageInfos: file_metastore_v1_metastore_proto_msgTypes, + }.Build() + File_metastore_v1_metastore_proto = out.File + file_metastore_v1_metastore_proto_rawDesc = nil + file_metastore_v1_metastore_proto_goTypes = nil + file_metastore_v1_metastore_proto_depIdxs = nil +} diff --git a/api/gen/proto/go/metastore/v1/metastore_vtproto.pb.go b/api/gen/proto/go/metastore/v1/metastore_vtproto.pb.go new file mode 100644 index 0000000000..809156debf --- /dev/null +++ b/api/gen/proto/go/metastore/v1/metastore_vtproto.pb.go @@ -0,0 +1,2296 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: metastore/v1/metastore.proto + +package metastorev1 + +import ( + context "context" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *AddBlockRequest) CloneVT() *AddBlockRequest { + if m == nil { + return (*AddBlockRequest)(nil) + } + r := new(AddBlockRequest) + r.Block = m.Block.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AddBlockRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddBlockResponse) CloneVT() *AddBlockResponse { + if m == nil { + return (*AddBlockResponse)(nil) + } + r := new(AddBlockResponse) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AddBlockResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BlockMeta) CloneVT() *BlockMeta { + if m == nil { + return (*BlockMeta)(nil) + } + r := new(BlockMeta) + r.FormatVersion = m.FormatVersion + r.Id = m.Id + r.MinTime = m.MinTime + r.MaxTime = m.MaxTime + r.Shard = m.Shard + r.CompactionLevel = m.CompactionLevel + r.TenantId = m.TenantId + r.Size = m.Size + if rhs := m.TenantServices; rhs != nil { + tmpContainer := make([]*TenantService, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TenantServices = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BlockMeta) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TenantService) CloneVT() *TenantService { + if m == nil { + return (*TenantService)(nil) + } + r := new(TenantService) + r.TenantId = m.TenantId + r.Name = m.Name + r.MinTime = m.MinTime + r.MaxTime = m.MaxTime + r.Size = m.Size + if rhs := m.TableOfContents; rhs != nil { + tmpContainer := make([]uint64, len(rhs)) + copy(tmpContainer, rhs) + r.TableOfContents = tmpContainer + } + if rhs := m.ProfileTypes; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ProfileTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TenantService) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ListBlocksForQueryRequest) CloneVT() *ListBlocksForQueryRequest { + if m == nil { + return (*ListBlocksForQueryRequest)(nil) + } + r := new(ListBlocksForQueryRequest) + r.StartTime = m.StartTime + r.EndTime = m.EndTime + r.Query = m.Query + if rhs := m.TenantId; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TenantId = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ListBlocksForQueryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ListBlocksForQueryResponse) CloneVT() *ListBlocksForQueryResponse { + if m == nil { + return (*ListBlocksForQueryResponse)(nil) + } + r := new(ListBlocksForQueryResponse) + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]*BlockMeta, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ListBlocksForQueryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadIndexRequest) CloneVT() *ReadIndexRequest { + if m == nil { + return (*ReadIndexRequest)(nil) + } + r := new(ReadIndexRequest) + r.DebugRequestId = m.DebugRequestId + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadIndexRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadIndexResponse) CloneVT() *ReadIndexResponse { + if m == nil { + return (*ReadIndexResponse)(nil) + } + r := new(ReadIndexResponse) + r.ReadIndex = m.ReadIndex + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadIndexResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *AddBlockRequest) EqualVT(that *AddBlockRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Block.EqualVT(that.Block) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *AddBlockRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*AddBlockRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *AddBlockResponse) EqualVT(that *AddBlockResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *AddBlockResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*AddBlockResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *BlockMeta) EqualVT(that *BlockMeta) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.FormatVersion != that.FormatVersion { + return false + } + if this.Id != that.Id { + return false + } + if this.MinTime != that.MinTime { + return false + } + if this.MaxTime != that.MaxTime { + return false + } + if this.Shard != that.Shard { + return false + } + if this.CompactionLevel != that.CompactionLevel { + return false + } + if this.TenantId != that.TenantId { + return false + } + if len(this.TenantServices) != len(that.TenantServices) { + return false + } + for i, vx := range this.TenantServices { + vy := that.TenantServices[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &TenantService{} + } + if q == nil { + q = &TenantService{} + } + if !p.EqualVT(q) { + return false + } + } + } + if this.Size != that.Size { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *BlockMeta) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*BlockMeta) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *TenantService) EqualVT(that *TenantService) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.TenantId != that.TenantId { + return false + } + if this.Name != that.Name { + return false + } + if this.MinTime != that.MinTime { + return false + } + if this.MaxTime != that.MaxTime { + return false + } + if len(this.TableOfContents) != len(that.TableOfContents) { + return false + } + for i, vx := range this.TableOfContents { + vy := that.TableOfContents[i] + if vx != vy { + return false + } + } + if this.Size != that.Size { + return false + } + if len(this.ProfileTypes) != len(that.ProfileTypes) { + return false + } + for i, vx := range this.ProfileTypes { + vy := that.ProfileTypes[i] + if vx != vy { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *TenantService) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*TenantService) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *ListBlocksForQueryRequest) EqualVT(that *ListBlocksForQueryRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.TenantId) != len(that.TenantId) { + return false + } + for i, vx := range this.TenantId { + vy := that.TenantId[i] + if vx != vy { + return false + } + } + if this.StartTime != that.StartTime { + return false + } + if this.EndTime != that.EndTime { + return false + } + if this.Query != that.Query { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ListBlocksForQueryRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*ListBlocksForQueryRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *ListBlocksForQueryResponse) EqualVT(that *ListBlocksForQueryResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &BlockMeta{} + } + if q == nil { + q = &BlockMeta{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ListBlocksForQueryResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*ListBlocksForQueryResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *ReadIndexRequest) EqualVT(that *ReadIndexRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.DebugRequestId != that.DebugRequestId { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ReadIndexRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*ReadIndexRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *ReadIndexResponse) EqualVT(that *ReadIndexResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.ReadIndex != that.ReadIndex { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *ReadIndexResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*ReadIndexResponse) + if !ok { + return false + } + return this.EqualVT(that) +} + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MetastoreServiceClient is the client API for MetastoreService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MetastoreServiceClient interface { + AddBlock(ctx context.Context, in *AddBlockRequest, opts ...grpc.CallOption) (*AddBlockResponse, error) + ListBlocksForQuery(ctx context.Context, in *ListBlocksForQueryRequest, opts ...grpc.CallOption) (*ListBlocksForQueryResponse, error) + ReadIndex(ctx context.Context, in *ReadIndexRequest, opts ...grpc.CallOption) (*ReadIndexResponse, error) +} + +type metastoreServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetastoreServiceClient(cc grpc.ClientConnInterface) MetastoreServiceClient { + return &metastoreServiceClient{cc} +} + +func (c *metastoreServiceClient) AddBlock(ctx context.Context, in *AddBlockRequest, opts ...grpc.CallOption) (*AddBlockResponse, error) { + out := new(AddBlockResponse) + err := c.cc.Invoke(ctx, "/metastore.v1.MetastoreService/AddBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metastoreServiceClient) ListBlocksForQuery(ctx context.Context, in *ListBlocksForQueryRequest, opts ...grpc.CallOption) (*ListBlocksForQueryResponse, error) { + out := new(ListBlocksForQueryResponse) + err := c.cc.Invoke(ctx, "/metastore.v1.MetastoreService/ListBlocksForQuery", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metastoreServiceClient) ReadIndex(ctx context.Context, in *ReadIndexRequest, opts ...grpc.CallOption) (*ReadIndexResponse, error) { + out := new(ReadIndexResponse) + err := c.cc.Invoke(ctx, "/metastore.v1.MetastoreService/ReadIndex", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetastoreServiceServer is the server API for MetastoreService service. +// All implementations must embed UnimplementedMetastoreServiceServer +// for forward compatibility +type MetastoreServiceServer interface { + AddBlock(context.Context, *AddBlockRequest) (*AddBlockResponse, error) + ListBlocksForQuery(context.Context, *ListBlocksForQueryRequest) (*ListBlocksForQueryResponse, error) + ReadIndex(context.Context, *ReadIndexRequest) (*ReadIndexResponse, error) + mustEmbedUnimplementedMetastoreServiceServer() +} + +// UnimplementedMetastoreServiceServer must be embedded to have forward compatible implementations. +type UnimplementedMetastoreServiceServer struct { +} + +func (UnimplementedMetastoreServiceServer) AddBlock(context.Context, *AddBlockRequest) (*AddBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddBlock not implemented") +} +func (UnimplementedMetastoreServiceServer) ListBlocksForQuery(context.Context, *ListBlocksForQueryRequest) (*ListBlocksForQueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListBlocksForQuery not implemented") +} +func (UnimplementedMetastoreServiceServer) ReadIndex(context.Context, *ReadIndexRequest) (*ReadIndexResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadIndex not implemented") +} +func (UnimplementedMetastoreServiceServer) mustEmbedUnimplementedMetastoreServiceServer() {} + +// UnsafeMetastoreServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MetastoreServiceServer will +// result in compilation errors. +type UnsafeMetastoreServiceServer interface { + mustEmbedUnimplementedMetastoreServiceServer() +} + +func RegisterMetastoreServiceServer(s grpc.ServiceRegistrar, srv MetastoreServiceServer) { + s.RegisterService(&MetastoreService_ServiceDesc, srv) +} + +func _MetastoreService_AddBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetastoreServiceServer).AddBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastore.v1.MetastoreService/AddBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetastoreServiceServer).AddBlock(ctx, req.(*AddBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetastoreService_ListBlocksForQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBlocksForQueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetastoreServiceServer).ListBlocksForQuery(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastore.v1.MetastoreService/ListBlocksForQuery", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetastoreServiceServer).ListBlocksForQuery(ctx, req.(*ListBlocksForQueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetastoreService_ReadIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadIndexRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetastoreServiceServer).ReadIndex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastore.v1.MetastoreService/ReadIndex", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetastoreServiceServer).ReadIndex(ctx, req.(*ReadIndexRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// MetastoreService_ServiceDesc is the grpc.ServiceDesc for MetastoreService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MetastoreService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "metastore.v1.MetastoreService", + HandlerType: (*MetastoreServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddBlock", + Handler: _MetastoreService_AddBlock_Handler, + }, + { + MethodName: "ListBlocksForQuery", + Handler: _MetastoreService_ListBlocksForQuery_Handler, + }, + { + MethodName: "ReadIndex", + Handler: _MetastoreService_ReadIndex_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "metastore/v1/metastore.proto", +} + +func (m *AddBlockRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddBlockRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Block != nil { + size, err := m.Block.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddBlockResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddBlockResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *BlockMeta) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Size != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x48 + } + if len(m.TenantServices) > 0 { + for iNdEx := len(m.TenantServices) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TenantServices[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x3a + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x30 + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x28 + } + if m.MaxTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x20 + } + if m.MinTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x18 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x12 + } + if m.FormatVersion != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FormatVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TenantService) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TenantService) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TenantService) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ProfileTypes) > 0 { + for iNdEx := len(m.ProfileTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProfileTypes[iNdEx]) + copy(dAtA[i:], m.ProfileTypes[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfileTypes[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.Size != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x30 + } + if len(m.TableOfContents) > 0 { + var pksize2 int + for _, num := range m.TableOfContents { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.TableOfContents { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x2a + } + if m.MaxTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxTime)) + i-- + dAtA[i] = 0x20 + } + if m.MinTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MinTime)) + i-- + dAtA[i] = 0x18 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListBlocksForQueryRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListBlocksForQueryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ListBlocksForQueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x22 + } + if m.EndTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x18 + } + if m.StartTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.TenantId) > 0 { + for iNdEx := len(m.TenantId) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TenantId[iNdEx]) + copy(dAtA[i:], m.TenantId[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListBlocksForQueryResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListBlocksForQueryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ListBlocksForQueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Blocks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadIndexRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadIndexRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadIndexRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DebugRequestId) > 0 { + i -= len(m.DebugRequestId) + copy(dAtA[i:], m.DebugRequestId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DebugRequestId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadIndexResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadIndexResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadIndexResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReadIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReadIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AddBlockRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddBlockResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *BlockMeta) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FormatVersion != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FormatVersion)) + } + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTime)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + l = len(m.TenantId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TenantServices) > 0 { + for _, e := range m.TenantServices { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Size != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Size)) + } + n += len(m.unknownFields) + return n +} + +func (m *TenantService) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TenantId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.MinTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MinTime)) + } + if m.MaxTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxTime)) + } + if len(m.TableOfContents) > 0 { + l = 0 + for _, e := range m.TableOfContents { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Size != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Size)) + } + if len(m.ProfileTypes) > 0 { + for _, s := range m.ProfileTypes { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ListBlocksForQueryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TenantId) > 0 { + for _, s := range m.TenantId { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StartTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EndTime)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ListBlocksForQueryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ReadIndexRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DebugRequestId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadIndexResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReadIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ReadIndex)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddBlockRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &BlockMeta{} + } + if err := m.Block.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddBlockResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMeta) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FormatVersion", wireType) + } + m.FormatVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FormatVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantServices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantServices = append(m.TenantServices, &TenantService{}) + if err := m.TenantServices[len(m.TenantServices)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TenantService) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TenantService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TenantService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTime", wireType) + } + m.MinTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTime", wireType) + } + m.MaxTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableOfContents = append(m.TableOfContents, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.TableOfContents) == 0 { + m.TableOfContents = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableOfContents = append(m.TableOfContents, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TableOfContents", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProfileTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProfileTypes = append(m.ProfileTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListBlocksForQueryRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListBlocksForQueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListBlocksForQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = append(m.TenantId, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + m.EndTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListBlocksForQueryResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListBlocksForQueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListBlocksForQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &BlockMeta{}) + if err := m.Blocks[len(m.Blocks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadIndexRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadIndexRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadIndexRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugRequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DebugRequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadIndexResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadIndexResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadIndexResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadIndex", wireType) + } + m.ReadIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.go b/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.go new file mode 100644 index 0000000000..8a55af7551 --- /dev/null +++ b/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.go @@ -0,0 +1,173 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: metastore/v1/metastore.proto + +package metastorev1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // MetastoreServiceName is the fully-qualified name of the MetastoreService service. + MetastoreServiceName = "metastore.v1.MetastoreService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // MetastoreServiceAddBlockProcedure is the fully-qualified name of the MetastoreService's AddBlock + // RPC. + MetastoreServiceAddBlockProcedure = "/metastore.v1.MetastoreService/AddBlock" + // MetastoreServiceListBlocksForQueryProcedure is the fully-qualified name of the MetastoreService's + // ListBlocksForQuery RPC. + MetastoreServiceListBlocksForQueryProcedure = "/metastore.v1.MetastoreService/ListBlocksForQuery" + // MetastoreServiceReadIndexProcedure is the fully-qualified name of the MetastoreService's + // ReadIndex RPC. + MetastoreServiceReadIndexProcedure = "/metastore.v1.MetastoreService/ReadIndex" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + metastoreServiceServiceDescriptor = v1.File_metastore_v1_metastore_proto.Services().ByName("MetastoreService") + metastoreServiceAddBlockMethodDescriptor = metastoreServiceServiceDescriptor.Methods().ByName("AddBlock") + metastoreServiceListBlocksForQueryMethodDescriptor = metastoreServiceServiceDescriptor.Methods().ByName("ListBlocksForQuery") + metastoreServiceReadIndexMethodDescriptor = metastoreServiceServiceDescriptor.Methods().ByName("ReadIndex") +) + +// MetastoreServiceClient is a client for the metastore.v1.MetastoreService service. +type MetastoreServiceClient interface { + AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) + ListBlocksForQuery(context.Context, *connect.Request[v1.ListBlocksForQueryRequest]) (*connect.Response[v1.ListBlocksForQueryResponse], error) + ReadIndex(context.Context, *connect.Request[v1.ReadIndexRequest]) (*connect.Response[v1.ReadIndexResponse], error) +} + +// NewMetastoreServiceClient constructs a client for the metastore.v1.MetastoreService service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewMetastoreServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) MetastoreServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &metastoreServiceClient{ + addBlock: connect.NewClient[v1.AddBlockRequest, v1.AddBlockResponse]( + httpClient, + baseURL+MetastoreServiceAddBlockProcedure, + connect.WithSchema(metastoreServiceAddBlockMethodDescriptor), + connect.WithClientOptions(opts...), + ), + listBlocksForQuery: connect.NewClient[v1.ListBlocksForQueryRequest, v1.ListBlocksForQueryResponse]( + httpClient, + baseURL+MetastoreServiceListBlocksForQueryProcedure, + connect.WithSchema(metastoreServiceListBlocksForQueryMethodDescriptor), + connect.WithClientOptions(opts...), + ), + readIndex: connect.NewClient[v1.ReadIndexRequest, v1.ReadIndexResponse]( + httpClient, + baseURL+MetastoreServiceReadIndexProcedure, + connect.WithSchema(metastoreServiceReadIndexMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// metastoreServiceClient implements MetastoreServiceClient. +type metastoreServiceClient struct { + addBlock *connect.Client[v1.AddBlockRequest, v1.AddBlockResponse] + listBlocksForQuery *connect.Client[v1.ListBlocksForQueryRequest, v1.ListBlocksForQueryResponse] + readIndex *connect.Client[v1.ReadIndexRequest, v1.ReadIndexResponse] +} + +// AddBlock calls metastore.v1.MetastoreService.AddBlock. +func (c *metastoreServiceClient) AddBlock(ctx context.Context, req *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) { + return c.addBlock.CallUnary(ctx, req) +} + +// ListBlocksForQuery calls metastore.v1.MetastoreService.ListBlocksForQuery. +func (c *metastoreServiceClient) ListBlocksForQuery(ctx context.Context, req *connect.Request[v1.ListBlocksForQueryRequest]) (*connect.Response[v1.ListBlocksForQueryResponse], error) { + return c.listBlocksForQuery.CallUnary(ctx, req) +} + +// ReadIndex calls metastore.v1.MetastoreService.ReadIndex. +func (c *metastoreServiceClient) ReadIndex(ctx context.Context, req *connect.Request[v1.ReadIndexRequest]) (*connect.Response[v1.ReadIndexResponse], error) { + return c.readIndex.CallUnary(ctx, req) +} + +// MetastoreServiceHandler is an implementation of the metastore.v1.MetastoreService service. +type MetastoreServiceHandler interface { + AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) + ListBlocksForQuery(context.Context, *connect.Request[v1.ListBlocksForQueryRequest]) (*connect.Response[v1.ListBlocksForQueryResponse], error) + ReadIndex(context.Context, *connect.Request[v1.ReadIndexRequest]) (*connect.Response[v1.ReadIndexResponse], error) +} + +// NewMetastoreServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewMetastoreServiceHandler(svc MetastoreServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + metastoreServiceAddBlockHandler := connect.NewUnaryHandler( + MetastoreServiceAddBlockProcedure, + svc.AddBlock, + connect.WithSchema(metastoreServiceAddBlockMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + metastoreServiceListBlocksForQueryHandler := connect.NewUnaryHandler( + MetastoreServiceListBlocksForQueryProcedure, + svc.ListBlocksForQuery, + connect.WithSchema(metastoreServiceListBlocksForQueryMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + metastoreServiceReadIndexHandler := connect.NewUnaryHandler( + MetastoreServiceReadIndexProcedure, + svc.ReadIndex, + connect.WithSchema(metastoreServiceReadIndexMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/metastore.v1.MetastoreService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case MetastoreServiceAddBlockProcedure: + metastoreServiceAddBlockHandler.ServeHTTP(w, r) + case MetastoreServiceListBlocksForQueryProcedure: + metastoreServiceListBlocksForQueryHandler.ServeHTTP(w, r) + case MetastoreServiceReadIndexProcedure: + metastoreServiceReadIndexHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedMetastoreServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedMetastoreServiceHandler struct{} + +func (UnimplementedMetastoreServiceHandler) AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("metastore.v1.MetastoreService.AddBlock is not implemented")) +} + +func (UnimplementedMetastoreServiceHandler) ListBlocksForQuery(context.Context, *connect.Request[v1.ListBlocksForQueryRequest]) (*connect.Response[v1.ListBlocksForQueryResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("metastore.v1.MetastoreService.ListBlocksForQuery is not implemented")) +} + +func (UnimplementedMetastoreServiceHandler) ReadIndex(context.Context, *connect.Request[v1.ReadIndexRequest]) (*connect.Response[v1.ReadIndexResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("metastore.v1.MetastoreService.ReadIndex is not implemented")) +} diff --git a/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.mux.go b/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.mux.go new file mode 100644 index 0000000000..9d0c507a55 --- /dev/null +++ b/api/gen/proto/go/metastore/v1/metastorev1connect/metastore.connect.mux.go @@ -0,0 +1,37 @@ +// Code generated by protoc-gen-connect-go-mux. DO NOT EDIT. +// +// Source: metastore/v1/metastore.proto + +package metastorev1connect + +import ( + connect "connectrpc.com/connect" + mux "github.com/gorilla/mux" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion0_1_0 + +// RegisterMetastoreServiceHandler register an HTTP handler to a mux.Router from the service +// implementation. +func RegisterMetastoreServiceHandler(mux *mux.Router, svc MetastoreServiceHandler, opts ...connect.HandlerOption) { + mux.Handle("/metastore.v1.MetastoreService/AddBlock", connect.NewUnaryHandler( + "/metastore.v1.MetastoreService/AddBlock", + svc.AddBlock, + opts..., + )) + mux.Handle("/metastore.v1.MetastoreService/ListBlocksForQuery", connect.NewUnaryHandler( + "/metastore.v1.MetastoreService/ListBlocksForQuery", + svc.ListBlocksForQuery, + opts..., + )) + mux.Handle("/metastore.v1.MetastoreService/ReadIndex", connect.NewUnaryHandler( + "/metastore.v1.MetastoreService/ReadIndex", + svc.ReadIndex, + opts..., + )) +} diff --git a/api/gen/proto/go/querybackend/v1/querybackend.pb.go b/api/gen/proto/go/querybackend/v1/querybackend.pb.go new file mode 100644 index 0000000000..fdb37dbfc7 --- /dev/null +++ b/api/gen/proto/go/querybackend/v1/querybackend.pb.go @@ -0,0 +1,1627 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: querybackend/v1/querybackend.proto + +package querybackendv1 + +import ( + _ "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + v11 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type QueryType int32 + +const ( + QueryType_QUERY_UNSPECIFIED QueryType = 0 + QueryType_QUERY_LABEL_NAMES QueryType = 1 + QueryType_QUERY_LABEL_VALUES QueryType = 2 + QueryType_QUERY_SERIES_LABELS QueryType = 3 + QueryType_QUERY_TIME_SERIES QueryType = 4 + QueryType_QUERY_TREE QueryType = 5 +) + +// Enum value maps for QueryType. +var ( + QueryType_name = map[int32]string{ + 0: "QUERY_UNSPECIFIED", + 1: "QUERY_LABEL_NAMES", + 2: "QUERY_LABEL_VALUES", + 3: "QUERY_SERIES_LABELS", + 4: "QUERY_TIME_SERIES", + 5: "QUERY_TREE", + } + QueryType_value = map[string]int32{ + "QUERY_UNSPECIFIED": 0, + "QUERY_LABEL_NAMES": 1, + "QUERY_LABEL_VALUES": 2, + "QUERY_SERIES_LABELS": 3, + "QUERY_TIME_SERIES": 4, + "QUERY_TREE": 5, + } +) + +func (x QueryType) Enum() *QueryType { + p := new(QueryType) + *p = x + return p +} + +func (x QueryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QueryType) Descriptor() protoreflect.EnumDescriptor { + return file_querybackend_v1_querybackend_proto_enumTypes[0].Descriptor() +} + +func (QueryType) Type() protoreflect.EnumType { + return &file_querybackend_v1_querybackend_proto_enumTypes[0] +} + +func (x QueryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QueryType.Descriptor instead. +func (QueryType) EnumDescriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{0} +} + +type ReportType int32 + +const ( + ReportType_REPORT_UNSPECIFIED ReportType = 0 + ReportType_REPORT_LABEL_NAMES ReportType = 1 + ReportType_REPORT_LABEL_VALUES ReportType = 2 + ReportType_REPORT_SERIES_LABELS ReportType = 3 + ReportType_REPORT_TIME_SERIES ReportType = 4 + ReportType_REPORT_TREE ReportType = 5 +) + +// Enum value maps for ReportType. +var ( + ReportType_name = map[int32]string{ + 0: "REPORT_UNSPECIFIED", + 1: "REPORT_LABEL_NAMES", + 2: "REPORT_LABEL_VALUES", + 3: "REPORT_SERIES_LABELS", + 4: "REPORT_TIME_SERIES", + 5: "REPORT_TREE", + } + ReportType_value = map[string]int32{ + "REPORT_UNSPECIFIED": 0, + "REPORT_LABEL_NAMES": 1, + "REPORT_LABEL_VALUES": 2, + "REPORT_SERIES_LABELS": 3, + "REPORT_TIME_SERIES": 4, + "REPORT_TREE": 5, + } +) + +func (x ReportType) Enum() *ReportType { + p := new(ReportType) + *p = x + return p +} + +func (x ReportType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReportType) Descriptor() protoreflect.EnumDescriptor { + return file_querybackend_v1_querybackend_proto_enumTypes[1].Descriptor() +} + +func (ReportType) Type() protoreflect.EnumType { + return &file_querybackend_v1_querybackend_proto_enumTypes[1] +} + +func (x ReportType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReportType.Descriptor instead. +func (ReportType) EnumDescriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{1} +} + +type InvokeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *InvokeOptions) Reset() { + *x = InvokeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeOptions) ProtoMessage() {} + +func (x *InvokeOptions) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeOptions.ProtoReflect.Descriptor instead. +func (*InvokeOptions) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{0} +} + +type InvokeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tenant []string `protobuf:"bytes,1,rep,name=tenant,proto3" json:"tenant,omitempty"` + StartTime int64 `protobuf:"varint,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + LabelSelector string `protobuf:"bytes,4,opt,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty"` + Query []*Query `protobuf:"bytes,5,rep,name=query,proto3" json:"query,omitempty"` + QueryPlan *QueryPlan `protobuf:"bytes,6,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"` + Options *InvokeOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *InvokeRequest) Reset() { + *x = InvokeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeRequest) ProtoMessage() {} + +func (x *InvokeRequest) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeRequest.ProtoReflect.Descriptor instead. +func (*InvokeRequest) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{1} +} + +func (x *InvokeRequest) GetTenant() []string { + if x != nil { + return x.Tenant + } + return nil +} + +func (x *InvokeRequest) GetStartTime() int64 { + if x != nil { + return x.StartTime + } + return 0 +} + +func (x *InvokeRequest) GetEndTime() int64 { + if x != nil { + return x.EndTime + } + return 0 +} + +func (x *InvokeRequest) GetLabelSelector() string { + if x != nil { + return x.LabelSelector + } + return "" +} + +func (x *InvokeRequest) GetQuery() []*Query { + if x != nil { + return x.Query + } + return nil +} + +func (x *InvokeRequest) GetQueryPlan() *QueryPlan { + if x != nil { + return x.QueryPlan + } + return nil +} + +func (x *InvokeRequest) GetOptions() *InvokeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Query plan is represented by a DAG, where each node +// might be either "merge" or "read" (leaves). Each node +// references a range: merge nodes refer to other nodes, +// while read nodes refer to the blocks. +type QueryPlan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each node is encoded with 3 values: + // - node type: 0 - read, 1 - merge; + // - range offset; + // - range length. + Graph []uint32 `protobuf:"varint,1,rep,packed,name=graph,proto3" json:"graph,omitempty"` + // The blocks matching the query. + Blocks []*v1.BlockMeta `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *QueryPlan) Reset() { + *x = QueryPlan{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryPlan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryPlan) ProtoMessage() {} + +func (x *QueryPlan) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead. +func (*QueryPlan) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{2} +} + +func (x *QueryPlan) GetGraph() []uint32 { + if x != nil { + return x.Graph + } + return nil +} + +func (x *QueryPlan) GetBlocks() []*v1.BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + +type Query struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QueryType QueryType `protobuf:"varint,1,opt,name=query_type,json=queryType,proto3,enum=querybackend.v1.QueryType" json:"query_type,omitempty"` + // Exactly one of the following fields should be set, + // depending on the query type. + LabelNames *LabelNamesQuery `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` + LabelValues *LabelValuesQuery `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + SeriesLabels *SeriesLabelsQuery `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` + TimeSeries *TimeSeriesQuery `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + Tree *TreeQuery `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` +} + +func (x *Query) Reset() { + *x = Query{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Query) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Query) ProtoMessage() {} + +func (x *Query) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Query.ProtoReflect.Descriptor instead. +func (*Query) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{3} +} + +func (x *Query) GetQueryType() QueryType { + if x != nil { + return x.QueryType + } + return QueryType_QUERY_UNSPECIFIED +} + +func (x *Query) GetLabelNames() *LabelNamesQuery { + if x != nil { + return x.LabelNames + } + return nil +} + +func (x *Query) GetLabelValues() *LabelValuesQuery { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *Query) GetSeriesLabels() *SeriesLabelsQuery { + if x != nil { + return x.SeriesLabels + } + return nil +} + +func (x *Query) GetTimeSeries() *TimeSeriesQuery { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *Query) GetTree() *TreeQuery { + if x != nil { + return x.Tree + } + return nil +} + +type InvokeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reports []*Report `protobuf:"bytes,1,rep,name=reports,proto3" json:"reports,omitempty"` + Diagnostics *Diagnostics `protobuf:"bytes,2,opt,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *InvokeResponse) Reset() { + *x = InvokeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeResponse) ProtoMessage() {} + +func (x *InvokeResponse) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeResponse.ProtoReflect.Descriptor instead. +func (*InvokeResponse) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{4} +} + +func (x *InvokeResponse) GetReports() []*Report { + if x != nil { + return x.Reports + } + return nil +} + +func (x *InvokeResponse) GetDiagnostics() *Diagnostics { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Diagnostic messages, events, statistics, analytics, etc. +type Diagnostics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Diagnostics) Reset() { + *x = Diagnostics{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostics) ProtoMessage() {} + +func (x *Diagnostics) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostics.ProtoReflect.Descriptor instead. +func (*Diagnostics) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{5} +} + +type Report struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReportType ReportType `protobuf:"varint,1,opt,name=report_type,json=reportType,proto3,enum=querybackend.v1.ReportType" json:"report_type,omitempty"` + // Exactly one of the following fields should be set, + // depending on the report type. + LabelNames *LabelNamesReport `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` + LabelValues *LabelValuesReport `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + SeriesLabels *SeriesLabelsReport `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` + TimeSeries *TimeSeriesReport `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + Tree *TreeReport `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` +} + +func (x *Report) Reset() { + *x = Report{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Report) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Report) ProtoMessage() {} + +func (x *Report) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Report.ProtoReflect.Descriptor instead. +func (*Report) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{6} +} + +func (x *Report) GetReportType() ReportType { + if x != nil { + return x.ReportType + } + return ReportType_REPORT_UNSPECIFIED +} + +func (x *Report) GetLabelNames() *LabelNamesReport { + if x != nil { + return x.LabelNames + } + return nil +} + +func (x *Report) GetLabelValues() *LabelValuesReport { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *Report) GetSeriesLabels() *SeriesLabelsReport { + if x != nil { + return x.SeriesLabels + } + return nil +} + +func (x *Report) GetTimeSeries() *TimeSeriesReport { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *Report) GetTree() *TreeReport { + if x != nil { + return x.Tree + } + return nil +} + +type LabelNamesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LabelNamesQuery) Reset() { + *x = LabelNamesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelNamesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelNamesQuery) ProtoMessage() {} + +func (x *LabelNamesQuery) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelNamesQuery.ProtoReflect.Descriptor instead. +func (*LabelNamesQuery) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{7} +} + +type LabelNamesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *LabelNamesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + LabelNames []string `protobuf:"bytes,2,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (x *LabelNamesReport) Reset() { + *x = LabelNamesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelNamesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelNamesReport) ProtoMessage() {} + +func (x *LabelNamesReport) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelNamesReport.ProtoReflect.Descriptor instead. +func (*LabelNamesReport) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{8} +} + +func (x *LabelNamesReport) GetQuery() *LabelNamesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *LabelNamesReport) GetLabelNames() []string { + if x != nil { + return x.LabelNames + } + return nil +} + +type LabelValuesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` +} + +func (x *LabelValuesQuery) Reset() { + *x = LabelValuesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValuesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValuesQuery) ProtoMessage() {} + +func (x *LabelValuesQuery) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValuesQuery.ProtoReflect.Descriptor instead. +func (*LabelValuesQuery) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{9} +} + +func (x *LabelValuesQuery) GetLabelName() string { + if x != nil { + return x.LabelName + } + return "" +} + +type LabelValuesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *LabelValuesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + LabelValues []string `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` +} + +func (x *LabelValuesReport) Reset() { + *x = LabelValuesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValuesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValuesReport) ProtoMessage() {} + +func (x *LabelValuesReport) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValuesReport.ProtoReflect.Descriptor instead. +func (*LabelValuesReport) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{10} +} + +func (x *LabelValuesReport) GetQuery() *LabelValuesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *LabelValuesReport) GetLabelValues() []string { + if x != nil { + return x.LabelValues + } + return nil +} + +type SeriesLabelsQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (x *SeriesLabelsQuery) Reset() { + *x = SeriesLabelsQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeriesLabelsQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeriesLabelsQuery) ProtoMessage() {} + +func (x *SeriesLabelsQuery) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeriesLabelsQuery.ProtoReflect.Descriptor instead. +func (*SeriesLabelsQuery) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{11} +} + +func (x *SeriesLabelsQuery) GetLabelNames() []string { + if x != nil { + return x.LabelNames + } + return nil +} + +type SeriesLabelsReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *SeriesLabelsQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + SeriesLabels []*v11.Labels `protobuf:"bytes,2,rep,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` +} + +func (x *SeriesLabelsReport) Reset() { + *x = SeriesLabelsReport{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeriesLabelsReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeriesLabelsReport) ProtoMessage() {} + +func (x *SeriesLabelsReport) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeriesLabelsReport.ProtoReflect.Descriptor instead. +func (*SeriesLabelsReport) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{12} +} + +func (x *SeriesLabelsReport) GetQuery() *SeriesLabelsQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *SeriesLabelsReport) GetSeriesLabels() []*v11.Labels { + if x != nil { + return x.SeriesLabels + } + return nil +} + +type TimeSeriesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Step float64 `protobuf:"fixed64,1,opt,name=step,proto3" json:"step,omitempty"` + GroupBy []string `protobuf:"bytes,2,rep,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"` + Aggregation *v11.TimeSeriesAggregationType `protobuf:"varint,3,opt,name=aggregation,proto3,enum=types.v1.TimeSeriesAggregationType,oneof" json:"aggregation,omitempty"` +} + +func (x *TimeSeriesQuery) Reset() { + *x = TimeSeriesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesQuery) ProtoMessage() {} + +func (x *TimeSeriesQuery) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesQuery.ProtoReflect.Descriptor instead. +func (*TimeSeriesQuery) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{13} +} + +func (x *TimeSeriesQuery) GetStep() float64 { + if x != nil { + return x.Step + } + return 0 +} + +func (x *TimeSeriesQuery) GetGroupBy() []string { + if x != nil { + return x.GroupBy + } + return nil +} + +func (x *TimeSeriesQuery) GetAggregation() v11.TimeSeriesAggregationType { + if x != nil && x.Aggregation != nil { + return *x.Aggregation + } + return v11.TimeSeriesAggregationType(0) +} + +type TimeSeriesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *TimeSeriesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + TimeSeries []*v11.Series `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` +} + +func (x *TimeSeriesReport) Reset() { + *x = TimeSeriesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesReport) ProtoMessage() {} + +func (x *TimeSeriesReport) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesReport.ProtoReflect.Descriptor instead. +func (*TimeSeriesReport) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{14} +} + +func (x *TimeSeriesReport) GetQuery() *TimeSeriesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *TimeSeriesReport) GetTimeSeries() []*v11.Series { + if x != nil { + return x.TimeSeries + } + return nil +} + +type TreeQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxNodes int64 `protobuf:"varint,1,opt,name=max_nodes,json=maxNodes,proto3" json:"max_nodes,omitempty"` +} + +func (x *TreeQuery) Reset() { + *x = TreeQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TreeQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TreeQuery) ProtoMessage() {} + +func (x *TreeQuery) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TreeQuery.ProtoReflect.Descriptor instead. +func (*TreeQuery) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{15} +} + +func (x *TreeQuery) GetMaxNodes() int64 { + if x != nil { + return x.MaxNodes + } + return 0 +} + +type TreeReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *TreeQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Tree []byte `protobuf:"bytes,2,opt,name=tree,proto3" json:"tree,omitempty"` +} + +func (x *TreeReport) Reset() { + *x = TreeReport{} + if protoimpl.UnsafeEnabled { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TreeReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TreeReport) ProtoMessage() {} + +func (x *TreeReport) ProtoReflect() protoreflect.Message { + mi := &file_querybackend_v1_querybackend_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TreeReport.ProtoReflect.Descriptor instead. +func (*TreeReport) Descriptor() ([]byte, []int) { + return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{16} +} + +func (x *TreeReport) GetQuery() *TreeQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *TreeReport) GetTree() []byte { + if x != nil { + return x.Tree + } + return nil +} + +var File_querybackend_v1_querybackend_proto protoreflect.FileDescriptor + +var file_querybackend_v1_querybackend_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x76, + 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x76, 0x31, + 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x0f, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, + 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, + 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x09, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x52, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x67, 0x72, 0x61, 0x70, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x87, 0x03, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x39, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0c, + 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x41, 0x0a, 0x0b, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x2e, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, + 0x83, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, + 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x42, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x6b, 0x0a, 0x10, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, + 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x31, 0x0a, 0x10, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x11, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x37, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x22, 0x85, 0x01, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x35, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x0c, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x0f, 0x54, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x73, 0x74, 0x65, + 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x12, 0x4a, 0x0a, 0x0b, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x23, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x05, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x31, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x28, 0x0a, 0x09, 0x54, 0x72, 0x65, 0x65, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x22, 0x52, 0x0a, 0x0a, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x30, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x74, 0x72, 0x65, 0x65, 0x2a, 0x91, 0x01, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x51, 0x55, 0x45, + 0x52, 0x59, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x49, 0x4d, 0x45, + 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x51, 0x55, 0x45, + 0x52, 0x59, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x10, 0x05, 0x2a, 0x98, 0x01, 0x0a, 0x0a, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, + 0x52, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x50, 0x4f, + 0x52, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, + 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x49, + 0x45, 0x53, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x52, + 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, + 0x53, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x52, + 0x45, 0x45, 0x10, 0x05, 0x32, 0x62, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x42, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x06, 0x49, + 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xd3, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, + 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x2f, 0x76, 0x31, 0x3b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x51, 0x58, 0x58, 0xaa, 0x02, 0x0f, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0f, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1b, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5c, 0x56, 0x31, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_querybackend_v1_querybackend_proto_rawDescOnce sync.Once + file_querybackend_v1_querybackend_proto_rawDescData = file_querybackend_v1_querybackend_proto_rawDesc +) + +func file_querybackend_v1_querybackend_proto_rawDescGZIP() []byte { + file_querybackend_v1_querybackend_proto_rawDescOnce.Do(func() { + file_querybackend_v1_querybackend_proto_rawDescData = protoimpl.X.CompressGZIP(file_querybackend_v1_querybackend_proto_rawDescData) + }) + return file_querybackend_v1_querybackend_proto_rawDescData +} + +var file_querybackend_v1_querybackend_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_querybackend_v1_querybackend_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_querybackend_v1_querybackend_proto_goTypes = []any{ + (QueryType)(0), // 0: querybackend.v1.QueryType + (ReportType)(0), // 1: querybackend.v1.ReportType + (*InvokeOptions)(nil), // 2: querybackend.v1.InvokeOptions + (*InvokeRequest)(nil), // 3: querybackend.v1.InvokeRequest + (*QueryPlan)(nil), // 4: querybackend.v1.QueryPlan + (*Query)(nil), // 5: querybackend.v1.Query + (*InvokeResponse)(nil), // 6: querybackend.v1.InvokeResponse + (*Diagnostics)(nil), // 7: querybackend.v1.Diagnostics + (*Report)(nil), // 8: querybackend.v1.Report + (*LabelNamesQuery)(nil), // 9: querybackend.v1.LabelNamesQuery + (*LabelNamesReport)(nil), // 10: querybackend.v1.LabelNamesReport + (*LabelValuesQuery)(nil), // 11: querybackend.v1.LabelValuesQuery + (*LabelValuesReport)(nil), // 12: querybackend.v1.LabelValuesReport + (*SeriesLabelsQuery)(nil), // 13: querybackend.v1.SeriesLabelsQuery + (*SeriesLabelsReport)(nil), // 14: querybackend.v1.SeriesLabelsReport + (*TimeSeriesQuery)(nil), // 15: querybackend.v1.TimeSeriesQuery + (*TimeSeriesReport)(nil), // 16: querybackend.v1.TimeSeriesReport + (*TreeQuery)(nil), // 17: querybackend.v1.TreeQuery + (*TreeReport)(nil), // 18: querybackend.v1.TreeReport + (*v1.BlockMeta)(nil), // 19: metastore.v1.BlockMeta + (*v11.Labels)(nil), // 20: types.v1.Labels + (v11.TimeSeriesAggregationType)(0), // 21: types.v1.TimeSeriesAggregationType + (*v11.Series)(nil), // 22: types.v1.Series +} +var file_querybackend_v1_querybackend_proto_depIdxs = []int32{ + 5, // 0: querybackend.v1.InvokeRequest.query:type_name -> querybackend.v1.Query + 4, // 1: querybackend.v1.InvokeRequest.query_plan:type_name -> querybackend.v1.QueryPlan + 2, // 2: querybackend.v1.InvokeRequest.options:type_name -> querybackend.v1.InvokeOptions + 19, // 3: querybackend.v1.QueryPlan.blocks:type_name -> metastore.v1.BlockMeta + 0, // 4: querybackend.v1.Query.query_type:type_name -> querybackend.v1.QueryType + 9, // 5: querybackend.v1.Query.label_names:type_name -> querybackend.v1.LabelNamesQuery + 11, // 6: querybackend.v1.Query.label_values:type_name -> querybackend.v1.LabelValuesQuery + 13, // 7: querybackend.v1.Query.series_labels:type_name -> querybackend.v1.SeriesLabelsQuery + 15, // 8: querybackend.v1.Query.time_series:type_name -> querybackend.v1.TimeSeriesQuery + 17, // 9: querybackend.v1.Query.tree:type_name -> querybackend.v1.TreeQuery + 8, // 10: querybackend.v1.InvokeResponse.reports:type_name -> querybackend.v1.Report + 7, // 11: querybackend.v1.InvokeResponse.diagnostics:type_name -> querybackend.v1.Diagnostics + 1, // 12: querybackend.v1.Report.report_type:type_name -> querybackend.v1.ReportType + 10, // 13: querybackend.v1.Report.label_names:type_name -> querybackend.v1.LabelNamesReport + 12, // 14: querybackend.v1.Report.label_values:type_name -> querybackend.v1.LabelValuesReport + 14, // 15: querybackend.v1.Report.series_labels:type_name -> querybackend.v1.SeriesLabelsReport + 16, // 16: querybackend.v1.Report.time_series:type_name -> querybackend.v1.TimeSeriesReport + 18, // 17: querybackend.v1.Report.tree:type_name -> querybackend.v1.TreeReport + 9, // 18: querybackend.v1.LabelNamesReport.query:type_name -> querybackend.v1.LabelNamesQuery + 11, // 19: querybackend.v1.LabelValuesReport.query:type_name -> querybackend.v1.LabelValuesQuery + 13, // 20: querybackend.v1.SeriesLabelsReport.query:type_name -> querybackend.v1.SeriesLabelsQuery + 20, // 21: querybackend.v1.SeriesLabelsReport.series_labels:type_name -> types.v1.Labels + 21, // 22: querybackend.v1.TimeSeriesQuery.aggregation:type_name -> types.v1.TimeSeriesAggregationType + 15, // 23: querybackend.v1.TimeSeriesReport.query:type_name -> querybackend.v1.TimeSeriesQuery + 22, // 24: querybackend.v1.TimeSeriesReport.time_series:type_name -> types.v1.Series + 17, // 25: querybackend.v1.TreeReport.query:type_name -> querybackend.v1.TreeQuery + 3, // 26: querybackend.v1.QueryBackendService.Invoke:input_type -> querybackend.v1.InvokeRequest + 6, // 27: querybackend.v1.QueryBackendService.Invoke:output_type -> querybackend.v1.InvokeResponse + 27, // [27:28] is the sub-list for method output_type + 26, // [26:27] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_querybackend_v1_querybackend_proto_init() } +func file_querybackend_v1_querybackend_proto_init() { + if File_querybackend_v1_querybackend_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_querybackend_v1_querybackend_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*InvokeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*InvokeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*QueryPlan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Query); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*InvokeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Diagnostics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*Report); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*LabelNamesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*LabelNamesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*LabelValuesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*LabelValuesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*SeriesLabelsQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*SeriesLabelsReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*TreeQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_querybackend_v1_querybackend_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*TreeReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_querybackend_v1_querybackend_proto_msgTypes[13].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_querybackend_v1_querybackend_proto_rawDesc, + NumEnums: 2, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_querybackend_v1_querybackend_proto_goTypes, + DependencyIndexes: file_querybackend_v1_querybackend_proto_depIdxs, + EnumInfos: file_querybackend_v1_querybackend_proto_enumTypes, + MessageInfos: file_querybackend_v1_querybackend_proto_msgTypes, + }.Build() + File_querybackend_v1_querybackend_proto = out.File + file_querybackend_v1_querybackend_proto_rawDesc = nil + file_querybackend_v1_querybackend_proto_goTypes = nil + file_querybackend_v1_querybackend_proto_depIdxs = nil +} diff --git a/api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go b/api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go new file mode 100644 index 0000000000..1e5ac09fe1 --- /dev/null +++ b/api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go @@ -0,0 +1,4500 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: querybackend/v1/querybackend.proto + +package querybackendv1 + +import ( + context "context" + binary "encoding/binary" + fmt "fmt" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + v11 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *InvokeOptions) CloneVT() *InvokeOptions { + if m == nil { + return (*InvokeOptions)(nil) + } + r := new(InvokeOptions) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *InvokeOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InvokeRequest) CloneVT() *InvokeRequest { + if m == nil { + return (*InvokeRequest)(nil) + } + r := new(InvokeRequest) + r.StartTime = m.StartTime + r.EndTime = m.EndTime + r.LabelSelector = m.LabelSelector + r.QueryPlan = m.QueryPlan.CloneVT() + r.Options = m.Options.CloneVT() + if rhs := m.Tenant; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tenant = tmpContainer + } + if rhs := m.Query; rhs != nil { + tmpContainer := make([]*Query, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Query = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *InvokeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryPlan) CloneVT() *QueryPlan { + if m == nil { + return (*QueryPlan)(nil) + } + r := new(QueryPlan) + if rhs := m.Graph; rhs != nil { + tmpContainer := make([]uint32, len(rhs)) + copy(tmpContainer, rhs) + r.Graph = tmpContainer + } + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]*v1.BlockMeta, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v1.BlockMeta }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v1.BlockMeta) + } + } + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryPlan) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Query) CloneVT() *Query { + if m == nil { + return (*Query)(nil) + } + r := new(Query) + r.QueryType = m.QueryType + r.LabelNames = m.LabelNames.CloneVT() + r.LabelValues = m.LabelValues.CloneVT() + r.SeriesLabels = m.SeriesLabels.CloneVT() + r.TimeSeries = m.TimeSeries.CloneVT() + r.Tree = m.Tree.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Query) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InvokeResponse) CloneVT() *InvokeResponse { + if m == nil { + return (*InvokeResponse)(nil) + } + r := new(InvokeResponse) + r.Diagnostics = m.Diagnostics.CloneVT() + if rhs := m.Reports; rhs != nil { + tmpContainer := make([]*Report, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Reports = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *InvokeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Diagnostics) CloneVT() *Diagnostics { + if m == nil { + return (*Diagnostics)(nil) + } + r := new(Diagnostics) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Diagnostics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Report) CloneVT() *Report { + if m == nil { + return (*Report)(nil) + } + r := new(Report) + r.ReportType = m.ReportType + r.LabelNames = m.LabelNames.CloneVT() + r.LabelValues = m.LabelValues.CloneVT() + r.SeriesLabels = m.SeriesLabels.CloneVT() + r.TimeSeries = m.TimeSeries.CloneVT() + r.Tree = m.Tree.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Report) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LabelNamesQuery) CloneVT() *LabelNamesQuery { + if m == nil { + return (*LabelNamesQuery)(nil) + } + r := new(LabelNamesQuery) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LabelNamesQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LabelNamesReport) CloneVT() *LabelNamesReport { + if m == nil { + return (*LabelNamesReport)(nil) + } + r := new(LabelNamesReport) + r.Query = m.Query.CloneVT() + if rhs := m.LabelNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.LabelNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LabelNamesReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LabelValuesQuery) CloneVT() *LabelValuesQuery { + if m == nil { + return (*LabelValuesQuery)(nil) + } + r := new(LabelValuesQuery) + r.LabelName = m.LabelName + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LabelValuesQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LabelValuesReport) CloneVT() *LabelValuesReport { + if m == nil { + return (*LabelValuesReport)(nil) + } + r := new(LabelValuesReport) + r.Query = m.Query.CloneVT() + if rhs := m.LabelValues; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.LabelValues = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LabelValuesReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SeriesLabelsQuery) CloneVT() *SeriesLabelsQuery { + if m == nil { + return (*SeriesLabelsQuery)(nil) + } + r := new(SeriesLabelsQuery) + if rhs := m.LabelNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.LabelNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SeriesLabelsQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SeriesLabelsReport) CloneVT() *SeriesLabelsReport { + if m == nil { + return (*SeriesLabelsReport)(nil) + } + r := new(SeriesLabelsReport) + r.Query = m.Query.CloneVT() + if rhs := m.SeriesLabels; rhs != nil { + tmpContainer := make([]*v11.Labels, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v11.Labels }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v11.Labels) + } + } + r.SeriesLabels = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SeriesLabelsReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TimeSeriesQuery) CloneVT() *TimeSeriesQuery { + if m == nil { + return (*TimeSeriesQuery)(nil) + } + r := new(TimeSeriesQuery) + r.Step = m.Step + if rhs := m.GroupBy; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.GroupBy = tmpContainer + } + if rhs := m.Aggregation; rhs != nil { + tmpVal := *rhs + r.Aggregation = &tmpVal + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TimeSeriesQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TimeSeriesReport) CloneVT() *TimeSeriesReport { + if m == nil { + return (*TimeSeriesReport)(nil) + } + r := new(TimeSeriesReport) + r.Query = m.Query.CloneVT() + if rhs := m.TimeSeries; rhs != nil { + tmpContainer := make([]*v11.Series, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v11.Series }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v11.Series) + } + } + r.TimeSeries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TimeSeriesReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TreeQuery) CloneVT() *TreeQuery { + if m == nil { + return (*TreeQuery)(nil) + } + r := new(TreeQuery) + r.MaxNodes = m.MaxNodes + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TreeQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TreeReport) CloneVT() *TreeReport { + if m == nil { + return (*TreeReport)(nil) + } + r := new(TreeReport) + r.Query = m.Query.CloneVT() + if rhs := m.Tree; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Tree = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TreeReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *InvokeOptions) EqualVT(that *InvokeOptions) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *InvokeOptions) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*InvokeOptions) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *InvokeRequest) EqualVT(that *InvokeRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Tenant) != len(that.Tenant) { + return false + } + for i, vx := range this.Tenant { + vy := that.Tenant[i] + if vx != vy { + return false + } + } + if this.StartTime != that.StartTime { + return false + } + if this.EndTime != that.EndTime { + return false + } + if this.LabelSelector != that.LabelSelector { + return false + } + if len(this.Query) != len(that.Query) { + return false + } + for i, vx := range this.Query { + vy := that.Query[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &Query{} + } + if q == nil { + q = &Query{} + } + if !p.EqualVT(q) { + return false + } + } + } + if !this.QueryPlan.EqualVT(that.QueryPlan) { + return false + } + if !this.Options.EqualVT(that.Options) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *InvokeRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*InvokeRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *QueryPlan) EqualVT(that *QueryPlan) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Graph) != len(that.Graph) { + return false + } + for i, vx := range this.Graph { + vy := that.Graph[i] + if vx != vy { + return false + } + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v1.BlockMeta{} + } + if q == nil { + q = &v1.BlockMeta{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v1.BlockMeta) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *QueryPlan) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*QueryPlan) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *Query) EqualVT(that *Query) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.QueryType != that.QueryType { + return false + } + if !this.LabelNames.EqualVT(that.LabelNames) { + return false + } + if !this.LabelValues.EqualVT(that.LabelValues) { + return false + } + if !this.SeriesLabels.EqualVT(that.SeriesLabels) { + return false + } + if !this.TimeSeries.EqualVT(that.TimeSeries) { + return false + } + if !this.Tree.EqualVT(that.Tree) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Query) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Query) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *InvokeResponse) EqualVT(that *InvokeResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Reports) != len(that.Reports) { + return false + } + for i, vx := range this.Reports { + vy := that.Reports[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &Report{} + } + if q == nil { + q = &Report{} + } + if !p.EqualVT(q) { + return false + } + } + } + if !this.Diagnostics.EqualVT(that.Diagnostics) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *InvokeResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*InvokeResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *Diagnostics) EqualVT(that *Diagnostics) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Diagnostics) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Diagnostics) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *Report) EqualVT(that *Report) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.ReportType != that.ReportType { + return false + } + if !this.LabelNames.EqualVT(that.LabelNames) { + return false + } + if !this.LabelValues.EqualVT(that.LabelValues) { + return false + } + if !this.SeriesLabels.EqualVT(that.SeriesLabels) { + return false + } + if !this.TimeSeries.EqualVT(that.TimeSeries) { + return false + } + if !this.Tree.EqualVT(that.Tree) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Report) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Report) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *LabelNamesQuery) EqualVT(that *LabelNamesQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *LabelNamesQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*LabelNamesQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *LabelNamesReport) EqualVT(that *LabelNamesReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if len(this.LabelNames) != len(that.LabelNames) { + return false + } + for i, vx := range this.LabelNames { + vy := that.LabelNames[i] + if vx != vy { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *LabelNamesReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*LabelNamesReport) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *LabelValuesQuery) EqualVT(that *LabelValuesQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.LabelName != that.LabelName { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *LabelValuesQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*LabelValuesQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *LabelValuesReport) EqualVT(that *LabelValuesReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if len(this.LabelValues) != len(that.LabelValues) { + return false + } + for i, vx := range this.LabelValues { + vy := that.LabelValues[i] + if vx != vy { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *LabelValuesReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*LabelValuesReport) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *SeriesLabelsQuery) EqualVT(that *SeriesLabelsQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.LabelNames) != len(that.LabelNames) { + return false + } + for i, vx := range this.LabelNames { + vy := that.LabelNames[i] + if vx != vy { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *SeriesLabelsQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*SeriesLabelsQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *SeriesLabelsReport) EqualVT(that *SeriesLabelsReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if len(this.SeriesLabels) != len(that.SeriesLabels) { + return false + } + for i, vx := range this.SeriesLabels { + vy := that.SeriesLabels[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v11.Labels{} + } + if q == nil { + q = &v11.Labels{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v11.Labels) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *SeriesLabelsReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*SeriesLabelsReport) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *TimeSeriesQuery) EqualVT(that *TimeSeriesQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Step != that.Step { + return false + } + if len(this.GroupBy) != len(that.GroupBy) { + return false + } + for i, vx := range this.GroupBy { + vy := that.GroupBy[i] + if vx != vy { + return false + } + } + if p, q := this.Aggregation, that.Aggregation; (p == nil && q != nil) || (p != nil && (q == nil || *p != *q)) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *TimeSeriesQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*TimeSeriesQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *TimeSeriesReport) EqualVT(that *TimeSeriesReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if len(this.TimeSeries) != len(that.TimeSeries) { + return false + } + for i, vx := range this.TimeSeries { + vy := that.TimeSeries[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v11.Series{} + } + if q == nil { + q = &v11.Series{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v11.Series) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *TimeSeriesReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*TimeSeriesReport) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *TreeQuery) EqualVT(that *TreeQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.MaxNodes != that.MaxNodes { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *TreeQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*TreeQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *TreeReport) EqualVT(that *TreeReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if string(this.Tree) != string(that.Tree) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *TreeReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*TreeReport) + if !ok { + return false + } + return this.EqualVT(that) +} + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// QueryBackendServiceClient is the client API for QueryBackendService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type QueryBackendServiceClient interface { + Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) +} + +type queryBackendServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryBackendServiceClient(cc grpc.ClientConnInterface) QueryBackendServiceClient { + return &queryBackendServiceClient{cc} +} + +func (c *queryBackendServiceClient) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) { + out := new(InvokeResponse) + err := c.cc.Invoke(ctx, "/querybackend.v1.QueryBackendService/Invoke", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryBackendServiceServer is the server API for QueryBackendService service. +// All implementations must embed UnimplementedQueryBackendServiceServer +// for forward compatibility +type QueryBackendServiceServer interface { + Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) + mustEmbedUnimplementedQueryBackendServiceServer() +} + +// UnimplementedQueryBackendServiceServer must be embedded to have forward compatible implementations. +type UnimplementedQueryBackendServiceServer struct { +} + +func (UnimplementedQueryBackendServiceServer) Invoke(context.Context, *InvokeRequest) (*InvokeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Invoke not implemented") +} +func (UnimplementedQueryBackendServiceServer) mustEmbedUnimplementedQueryBackendServiceServer() {} + +// UnsafeQueryBackendServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to QueryBackendServiceServer will +// result in compilation errors. +type UnsafeQueryBackendServiceServer interface { + mustEmbedUnimplementedQueryBackendServiceServer() +} + +func RegisterQueryBackendServiceServer(s grpc.ServiceRegistrar, srv QueryBackendServiceServer) { + s.RegisterService(&QueryBackendService_ServiceDesc, srv) +} + +func _QueryBackendService_Invoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBackendServiceServer).Invoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/querybackend.v1.QueryBackendService/Invoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBackendServiceServer).Invoke(ctx, req.(*InvokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// QueryBackendService_ServiceDesc is the grpc.ServiceDesc for QueryBackendService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var QueryBackendService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "querybackend.v1.QueryBackendService", + HandlerType: (*QueryBackendServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Invoke", + Handler: _QueryBackendService_Invoke_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "querybackend/v1/querybackend.proto", +} + +func (m *InvokeOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InvokeOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InvokeOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *InvokeRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InvokeRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InvokeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.QueryPlan != nil { + size, err := m.QueryPlan.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Query) > 0 { + for iNdEx := len(m.Query) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Query[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.LabelSelector) > 0 { + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0x22 + } + if m.EndTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x18 + } + if m.StartTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tenant) > 0 { + for iNdEx := len(m.Tenant) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tenant[iNdEx]) + copy(dAtA[i:], m.Tenant[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryPlan) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPlan) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *QueryPlan) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Blocks[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Blocks[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Graph) > 0 { + var pksize2 int + for _, num := range m.Graph { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.Graph { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Query) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Query) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Query) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Tree != nil { + size, err := m.Tree.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.TimeSeries != nil { + size, err := m.TimeSeries.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.SeriesLabels != nil { + size, err := m.SeriesLabels.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LabelValues != nil { + size, err := m.LabelValues.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.LabelNames != nil { + size, err := m.LabelNames.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.QueryType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.QueryType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InvokeResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InvokeResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InvokeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Diagnostics != nil { + size, err := m.Diagnostics.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Reports) > 0 { + for iNdEx := len(m.Reports) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Reports[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Diagnostics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Diagnostics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Diagnostics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *Report) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Report) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Report) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Tree != nil { + size, err := m.Tree.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.TimeSeries != nil { + size, err := m.TimeSeries.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.SeriesLabels != nil { + size, err := m.SeriesLabels.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.LabelValues != nil { + size, err := m.LabelValues.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.LabelNames != nil { + size, err := m.LabelNames.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.ReportType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReportType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LabelNamesQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LabelNamesQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *LabelNamesReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelNamesReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LabelNamesReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LabelNames) > 0 { + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LabelValuesQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LabelName) > 0 { + i -= len(m.LabelName) + copy(dAtA[i:], m.LabelName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LabelValuesReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelValuesReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LabelValuesReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LabelValues) > 0 { + for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelValues[iNdEx]) + copy(dAtA[i:], m.LabelValues[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SeriesLabelsQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesLabelsQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SeriesLabelsQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LabelNames) > 0 { + for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelNames[iNdEx]) + copy(dAtA[i:], m.LabelNames[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SeriesLabelsReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesLabelsReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SeriesLabelsReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SeriesLabels) > 0 { + for iNdEx := len(m.SeriesLabels) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.SeriesLabels[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.SeriesLabels[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimeSeriesQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TimeSeriesQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Aggregation != nil { + i = protohelpers.EncodeVarint(dAtA, i, uint64(*m.Aggregation)) + i-- + dAtA[i] = 0x18 + } + if len(m.GroupBy) > 0 { + for iNdEx := len(m.GroupBy) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupBy[iNdEx]) + copy(dAtA[i:], m.GroupBy[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.GroupBy[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Step != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Step)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeriesReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TimeSeriesReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TimeSeries) > 0 { + for iNdEx := len(m.TimeSeries) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.TimeSeries[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.TimeSeries[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TreeQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TreeQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TreeQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxNodes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxNodes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TreeReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TreeReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TreeReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tree) > 0 { + i -= len(m.Tree) + copy(dAtA[i:], m.Tree) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tree))) + i-- + dAtA[i] = 0x12 + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InvokeOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *InvokeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tenant) > 0 { + for _, s := range m.Tenant { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.StartTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EndTime)) + } + l = len(m.LabelSelector) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Query) > 0 { + for _, e := range m.Query { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.QueryPlan != nil { + l = m.QueryPlan.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QueryPlan) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Graph) > 0 { + l = 0 + for _, e := range m.Graph { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Query) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.QueryType)) + } + if m.LabelNames != nil { + l = m.LabelNames.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LabelValues != nil { + l = m.LabelValues.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SeriesLabels != nil { + l = m.SeriesLabels.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeSeries != nil { + l = m.TimeSeries.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tree != nil { + l = m.Tree.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *InvokeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Reports) > 0 { + for _, e := range m.Reports { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Diagnostics != nil { + l = m.Diagnostics.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Diagnostics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *Report) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ReportType != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ReportType)) + } + if m.LabelNames != nil { + l = m.LabelNames.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.LabelValues != nil { + l = m.LabelValues.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.SeriesLabels != nil { + l = m.SeriesLabels.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TimeSeries != nil { + l = m.TimeSeries.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Tree != nil { + l = m.Tree.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LabelNamesQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LabelNamesReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LabelValuesQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LabelName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LabelValuesReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SeriesLabelsQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelNames) > 0 { + for _, s := range m.LabelNames { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SeriesLabelsReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.SeriesLabels) > 0 { + for _, e := range m.SeriesLabels { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *TimeSeriesQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Step != 0 { + n += 9 + } + if len(m.GroupBy) > 0 { + for _, s := range m.GroupBy { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Aggregation != nil { + n += 1 + protohelpers.SizeOfVarint(uint64(*m.Aggregation)) + } + n += len(m.unknownFields) + return n +} + +func (m *TimeSeriesReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.TimeSeries) > 0 { + for _, e := range m.TimeSeries { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *TreeQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxNodes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxNodes)) + } + n += len(m.unknownFields) + return n +} + +func (m *TreeReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Tree) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *InvokeOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InvokeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InvokeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InvokeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InvokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InvokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = append(m.Tenant, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + m.EndTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query, &Query{}) + if err := m.Query[len(m.Query)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryPlan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueryPlan == nil { + m.QueryPlan = &QueryPlan{} + } + if err := m.QueryPlan.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &InvokeOptions{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPlan) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPlan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPlan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Graph = append(m.Graph, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Graph) == 0 { + m.Graph = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Graph = append(m.Graph, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Graph", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &v1.BlockMeta{}) + if unmarshal, ok := interface{}(m.Blocks[len(m.Blocks)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Blocks[len(m.Blocks)-1]); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Query) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Query: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryType", wireType) + } + m.QueryType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryType |= QueryType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelNames == nil { + m.LabelNames = &LabelNamesQuery{} + } + if err := m.LabelNames.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelValues == nil { + m.LabelValues = &LabelValuesQuery{} + } + if err := m.LabelValues.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeriesLabels == nil { + m.SeriesLabels = &SeriesLabelsQuery{} + } + if err := m.SeriesLabels.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeSeries == nil { + m.TimeSeries = &TimeSeriesQuery{} + } + if err := m.TimeSeries.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tree == nil { + m.Tree = &TreeQuery{} + } + if err := m.Tree.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InvokeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InvokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InvokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reports = append(m.Reports, &Report{}) + if err := m.Reports[len(m.Reports)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diagnostics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Diagnostics == nil { + m.Diagnostics = &Diagnostics{} + } + if err := m.Diagnostics.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Diagnostics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Diagnostics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Diagnostics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Report) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Report: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Report: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportType", wireType) + } + m.ReportType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReportType |= ReportType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelNames == nil { + m.LabelNames = &LabelNamesReport{} + } + if err := m.LabelNames.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelValues == nil { + m.LabelValues = &LabelValuesReport{} + } + if err := m.LabelValues.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeriesLabels == nil { + m.SeriesLabels = &SeriesLabelsReport{} + } + if err := m.SeriesLabels.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeSeries == nil { + m.TimeSeries = &TimeSeriesReport{} + } + if err := m.TimeSeries.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tree", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tree == nil { + m.Tree = &TreeReport{} + } + if err := m.Tree.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelNamesReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelNamesReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelNamesReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &LabelNamesQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelValuesReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelValuesReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelValuesReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &LabelValuesQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesLabelsQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesLabelsQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesLabelsQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesLabelsReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesLabelsReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesLabelsReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &SeriesLabelsQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeriesLabels = append(m.SeriesLabels, &v11.Labels{}) + if unmarshal, ok := interface{}(m.SeriesLabels[len(m.SeriesLabels)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.SeriesLabels[len(m.SeriesLabels)-1]); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Step = float64(math.Float64frombits(v)) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupBy = append(m.GroupBy, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Aggregation", wireType) + } + var v v11.TimeSeriesAggregationType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= v11.TimeSeriesAggregationType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Aggregation = &v + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &TimeSeriesQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TimeSeries = append(m.TimeSeries, &v11.Series{}) + if unmarshal, ok := interface{}(m.TimeSeries[len(m.TimeSeries)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.TimeSeries[len(m.TimeSeries)-1]); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TreeQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TreeQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TreeQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNodes", wireType) + } + m.MaxNodes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNodes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TreeReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TreeReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TreeReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &TreeQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tree", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tree = append(m.Tree[:0], dAtA[iNdEx:postIndex]...) + if m.Tree == nil { + m.Tree = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go b/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go new file mode 100644 index 0000000000..637f1d3f58 --- /dev/null +++ b/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: querybackend/v1/querybackend.proto + +package querybackendv1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // QueryBackendServiceName is the fully-qualified name of the QueryBackendService service. + QueryBackendServiceName = "querybackend.v1.QueryBackendService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // QueryBackendServiceInvokeProcedure is the fully-qualified name of the QueryBackendService's + // Invoke RPC. + QueryBackendServiceInvokeProcedure = "/querybackend.v1.QueryBackendService/Invoke" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + queryBackendServiceServiceDescriptor = v1.File_querybackend_v1_querybackend_proto.Services().ByName("QueryBackendService") + queryBackendServiceInvokeMethodDescriptor = queryBackendServiceServiceDescriptor.Methods().ByName("Invoke") +) + +// QueryBackendServiceClient is a client for the querybackend.v1.QueryBackendService service. +type QueryBackendServiceClient interface { + Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) +} + +// NewQueryBackendServiceClient constructs a client for the querybackend.v1.QueryBackendService +// service. By default, it uses the Connect protocol with the binary Protobuf Codec, asks for +// gzipped responses, and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply +// the connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewQueryBackendServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) QueryBackendServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &queryBackendServiceClient{ + invoke: connect.NewClient[v1.InvokeRequest, v1.InvokeResponse]( + httpClient, + baseURL+QueryBackendServiceInvokeProcedure, + connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// queryBackendServiceClient implements QueryBackendServiceClient. +type queryBackendServiceClient struct { + invoke *connect.Client[v1.InvokeRequest, v1.InvokeResponse] +} + +// Invoke calls querybackend.v1.QueryBackendService.Invoke. +func (c *queryBackendServiceClient) Invoke(ctx context.Context, req *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { + return c.invoke.CallUnary(ctx, req) +} + +// QueryBackendServiceHandler is an implementation of the querybackend.v1.QueryBackendService +// service. +type QueryBackendServiceHandler interface { + Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) +} + +// NewQueryBackendServiceHandler builds an HTTP handler from the service implementation. It returns +// the path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewQueryBackendServiceHandler(svc QueryBackendServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + queryBackendServiceInvokeHandler := connect.NewUnaryHandler( + QueryBackendServiceInvokeProcedure, + svc.Invoke, + connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/querybackend.v1.QueryBackendService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case QueryBackendServiceInvokeProcedure: + queryBackendServiceInvokeHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedQueryBackendServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedQueryBackendServiceHandler struct{} + +func (UnimplementedQueryBackendServiceHandler) Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("querybackend.v1.QueryBackendService.Invoke is not implemented")) +} diff --git a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go b/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go new file mode 100644 index 0000000000..2a58ad96eb --- /dev/null +++ b/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go @@ -0,0 +1,27 @@ +// Code generated by protoc-gen-connect-go-mux. DO NOT EDIT. +// +// Source: querybackend/v1/querybackend.proto + +package querybackendv1connect + +import ( + connect "connectrpc.com/connect" + mux "github.com/gorilla/mux" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion0_1_0 + +// RegisterQueryBackendServiceHandler register an HTTP handler to a mux.Router from the service +// implementation. +func RegisterQueryBackendServiceHandler(mux *mux.Router, svc QueryBackendServiceHandler, opts ...connect.HandlerOption) { + mux.Handle("/querybackend.v1.QueryBackendService/Invoke", connect.NewUnaryHandler( + "/querybackend.v1.QueryBackendService/Invoke", + svc.Invoke, + opts..., + )) +} diff --git a/api/metastore/v1/metastore.proto b/api/metastore/v1/metastore.proto new file mode 100644 index 0000000000..db7cbf7276 --- /dev/null +++ b/api/metastore/v1/metastore.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package metastore.v1; + +service MetastoreService { + rpc AddBlock(AddBlockRequest) returns (AddBlockResponse) {} + rpc ListBlocksForQuery(ListBlocksForQueryRequest) returns (ListBlocksForQueryResponse) {} + rpc ReadIndex(ReadIndexRequest) returns (ReadIndexResponse) {} +} + +message AddBlockRequest { + BlockMeta block = 1; +} + +message AddBlockResponse {} + +message BlockMeta { + uint64 format_version = 1; + string id = 2; + int64 min_time = 3; + int64 max_time = 4; + uint32 shard = 5; + uint32 compaction_level = 6; + // Optional. Empty if compaction level is 0. + string tenant_id = 7; + // TODO(kolesnikovae): Partitions with labels? + repeated TenantService tenant_services = 8; + uint64 size = 9; +} + +// TenantService object points to the offset in the block at which +// the tenant service data is located. +message TenantService { + string tenant_id = 1; + string name = 2; + int64 min_time = 3; + int64 max_time = 4; + + // Table of contents lists data sections within the tenant + // service region. The offsets are absolute. + // + // The interpretation of the table of contents is specific + // to the metadata format version. By default, the sections are: + // - 0: profiles.parquet + // - 1: index.tsdb + // - 2: symbols.symdb + repeated uint64 table_of_contents = 5; + // Size of the section in bytes. + uint64 size = 6; + // Profile types present in the tenant service data. + repeated string profile_types = 7; +} + +message ListBlocksForQueryRequest { + repeated string tenant_id = 1; + int64 start_time = 2; + int64 end_time = 3; + string query = 4; +} + +message ListBlocksForQueryResponse { + repeated BlockMeta blocks = 1; +} + +message ReadIndexRequest { + string debug_request_id = 1; // for debug logging, // todo delete +} + +message ReadIndexResponse { + uint64 read_index = 1; +} diff --git a/api/openapiv2/gen/phlare.swagger.json b/api/openapiv2/gen/phlare.swagger.json index 5e58aecba9..5d77fea064 100644 --- a/api/openapiv2/gen/phlare.swagger.json +++ b/api/openapiv2/gen/phlare.swagger.json @@ -8,6 +8,12 @@ { "name": "AdHocProfileService" }, + { + "name": "MetastoreService" + }, + { + "name": "CompactionPlanner" + }, { "name": "PusherService" }, @@ -17,6 +23,9 @@ { "name": "QuerierService" }, + { + "name": "QueryBackendService" + }, { "name": "SettingsService" }, @@ -451,6 +460,9 @@ } } }, + "v1AddBlockResponse": { + "type": "object" + }, "v1AnalyzeQueryResponse": { "type": "object", "properties": { @@ -531,6 +543,50 @@ } } }, + "v1BlockMeta": { + "type": "object", + "properties": { + "formatVersion": { + "type": "string", + "format": "uint64" + }, + "id": { + "type": "string" + }, + "minTime": { + "type": "string", + "format": "int64" + }, + "maxTime": { + "type": "string", + "format": "int64" + }, + "shard": { + "type": "integer", + "format": "int64" + }, + "compactionLevel": { + "type": "integer", + "format": "int64" + }, + "tenantId": { + "type": "string", + "description": "Optional. Empty if compaction level is 0." + }, + "tenantServices": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1TenantService" + }, + "title": "TODO(kolesnikovae): Partitions with labels?" + }, + "size": { + "type": "string", + "format": "uint64" + } + } + }, "v1BlockMetadataResponse": { "type": "object", "properties": { @@ -586,6 +642,114 @@ } } }, + "v1CompactionJob": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Unique name of the job." + }, + "options": { + "$ref": "#/definitions/v1CompactionOptions" + }, + "blocks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1BlockMeta" + }, + "description": "List of the input blocks." + }, + "status": { + "$ref": "#/definitions/v1CompactionJobStatus" + }, + "raftLogIndex": { + "type": "string", + "format": "uint64", + "description": "Fencing token." + }, + "shard": { + "type": "integer", + "format": "int64", + "description": "Shard the blocks belong to." + }, + "tenantId": { + "type": "string", + "description": "Optional, empty for compaction level 0." + }, + "compactionLevel": { + "type": "integer", + "format": "int64" + } + }, + "description": "One compaction job may result in multiple output blocks." + }, + "v1CompactionJobStatus": { + "type": "object", + "properties": { + "jobName": { + "type": "string" + }, + "status": { + "$ref": "#/definitions/v1CompactionStatus", + "description": "Status update allows the planner to keep\ntrack of the job ownership and compaction\nprogress:\n- If the job status is other than IN_PROGRESS,\n the ownership of the job is revoked.\n- FAILURE must only be sent if the failure is\n persistent and the compaction can't be accomplished.\n- completed_job must be empty if the status is\n other than SUCCESS, and vice-versa.\n- UNSPECIFIED must be sent if the worker rejects\n or cancels the compaction job.\n\nPartial results/status is not allowed." + }, + "completedJob": { + "$ref": "#/definitions/v1CompletedJob" + }, + "raftLogIndex": { + "type": "string", + "format": "uint64", + "description": "Fencing token." + }, + "shard": { + "type": "integer", + "format": "int64", + "description": "Shard the blocks belong to." + }, + "tenantId": { + "type": "string", + "description": "Optional, empty for compaction level 0." + } + } + }, + "v1CompactionOptions": { + "type": "object", + "properties": { + "statusUpdateIntervalSeconds": { + "type": "string", + "format": "uint64", + "description": "How often the compaction worker should update\nthe job status. If overdue, the job ownership\nis revoked." + } + }, + "description": "Compaction planner should instruct the compactor\n worker how to compact the blocks:\n - Limits and tenant overrides.\n - Feature flags." + }, + "v1CompactionStatus": { + "type": "string", + "enum": [ + "COMPACTION_STATUS_UNSPECIFIED", + "COMPACTION_STATUS_IN_PROGRESS", + "COMPACTION_STATUS_SUCCESS", + "COMPACTION_STATUS_FAILURE" + ], + "default": "COMPACTION_STATUS_UNSPECIFIED" + }, + "v1CompletedJob": { + "type": "object", + "properties": { + "blocks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1BlockMeta" + } + } + } + }, + "v1Diagnostics": { + "type": "object", + "description": "Diagnostic messages, events, statistics, analytics, etc." + }, "v1DiffResponse": { "type": "object", "properties": { @@ -758,6 +922,19 @@ } } }, + "v1GetCompactionResponse": { + "type": "object", + "properties": { + "compactionJobs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1CompactionJob" + }, + "title": "A list of all compaction jobs" + } + } + }, "v1GetFileResponse": { "type": "object", "properties": { @@ -849,6 +1026,42 @@ }, "title": "Hints are used to propagate information about querying" }, + "v1InvokeOptions": { + "type": "object", + "description": "Query workers might not have access to the tenant\n overrides, therefore all the necessary options should\n be listed in the request explicitly." + }, + "v1InvokeResponse": { + "type": "object", + "properties": { + "reports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1Report" + } + }, + "diagnostics": { + "$ref": "#/definitions/v1Diagnostics" + } + } + }, + "v1LabelNamesQuery": { + "type": "object" + }, + "v1LabelNamesReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1LabelNamesQuery" + }, + "labelNames": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1LabelNamesResponse": { "type": "object", "properties": { @@ -871,6 +1084,28 @@ } } }, + "v1LabelValuesQuery": { + "type": "object", + "properties": { + "labelName": { + "type": "string" + } + } + }, + "v1LabelValuesReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1LabelValuesQuery" + }, + "labelValues": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1LabelValuesResponse": { "type": "object", "properties": { @@ -922,6 +1157,18 @@ } } }, + "v1ListBlocksForQueryResponse": { + "type": "object", + "properties": { + "blocks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1BlockMeta" + } + } + } + }, "v1Mapping": { "type": "object", "properties": { @@ -1077,6 +1324,18 @@ } } }, + "v1PollCompactionJobsResponse": { + "type": "object", + "properties": { + "compactionJobs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1CompactionJob" + } + } + } + }, "v1ProfileFormat": { "type": "string", "enum": [ @@ -1139,6 +1398,31 @@ "v1PushResponse": { "type": "object" }, + "v1Query": { + "type": "object", + "properties": { + "queryType": { + "$ref": "#/definitions/v1QueryType" + }, + "labelNames": { + "$ref": "#/definitions/v1LabelNamesQuery", + "description": "Exactly one of the following fields should be set,\ndepending on the query type." + }, + "labelValues": { + "$ref": "#/definitions/v1LabelValuesQuery" + }, + "seriesLabels": { + "$ref": "#/definitions/v1SeriesLabelsQuery" + }, + "timeSeries": { + "$ref": "#/definitions/v1TimeSeriesQuery" + }, + "tree": { + "$ref": "#/definitions/v1TreeQuery", + "description": "pprof\n function_details\n call_graph\n top_table\n ..." + } + } + }, "v1QueryImpact": { "type": "object", "properties": { @@ -1155,6 +1439,28 @@ } } }, + "v1QueryPlan": { + "type": "object", + "properties": { + "graph": { + "type": "array", + "items": { + "type": "integer", + "format": "int64" + }, + "description": "Each node is encoded with 3 values:\n - node type: 0 - read, 1 - merge;\n - range offset;\n - range length." + }, + "blocks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1BlockMeta" + }, + "description": "The blocks matching the query.\n\nTODO: Use opaque byte array to avoid unnecessary\n proto encoding/decoding overhead in transient nodes.\n Graph nodes reference ranges, thus each range could\n be a blob of bytes:\n repeated bytes block_ranges = 2;" + } + }, + "description": "Query plan is represented by a DAG, where each node\nmight be either \"merge\" or \"read\" (leaves). Each node\nreferences a range: merge nodes refer to other nodes,\nwhile read nodes refer to the blocks." + }, "v1QueryScope": { "type": "object", "properties": { @@ -1197,6 +1503,18 @@ } } }, + "v1QueryType": { + "type": "string", + "enum": [ + "QUERY_UNSPECIFIED", + "QUERY_LABEL_NAMES", + "QUERY_LABEL_VALUES", + "QUERY_SERIES_LABELS", + "QUERY_TIME_SERIES", + "QUERY_TREE" + ], + "default": "QUERY_UNSPECIFIED" + }, "v1RawProfileSeries": { "type": "object", "properties": { @@ -1234,6 +1552,51 @@ }, "title": "RawSample is the set of bytes that correspond to a pprof profile" }, + "v1ReadIndexResponse": { + "type": "object", + "properties": { + "readIndex": { + "type": "string", + "format": "uint64" + } + } + }, + "v1Report": { + "type": "object", + "properties": { + "reportType": { + "$ref": "#/definitions/v1ReportType" + }, + "labelNames": { + "$ref": "#/definitions/v1LabelNamesReport", + "description": "Exactly one of the following fields should be set,\ndepending on the report type." + }, + "labelValues": { + "$ref": "#/definitions/v1LabelValuesReport" + }, + "seriesLabels": { + "$ref": "#/definitions/v1SeriesLabelsReport" + }, + "timeSeries": { + "$ref": "#/definitions/v1TimeSeriesReport" + }, + "tree": { + "$ref": "#/definitions/v1TreeReport" + } + } + }, + "v1ReportType": { + "type": "string", + "enum": [ + "REPORT_UNSPECIFIED", + "REPORT_LABEL_NAMES", + "REPORT_LABEL_VALUES", + "REPORT_SERIES_LABELS", + "REPORT_TIME_SERIES", + "REPORT_TREE" + ], + "default": "REPORT_UNSPECIFIED" + }, "v1Sample": { "type": "object", "properties": { @@ -1412,6 +1775,32 @@ } } }, + "v1SeriesLabelsQuery": { + "type": "object", + "properties": { + "labelNames": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1SeriesLabelsReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1SeriesLabelsQuery" + }, + "seriesLabels": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1Labels" + } + } + } + }, "v1SeriesProfile": { "type": "object", "properties": { @@ -1493,6 +1882,46 @@ ], "default": "MERGE_FORMAT_UNSPECIFIED" }, + "v1TenantService": { + "type": "object", + "properties": { + "tenantId": { + "type": "string" + }, + "name": { + "type": "string" + }, + "minTime": { + "type": "string", + "format": "int64" + }, + "maxTime": { + "type": "string", + "format": "int64" + }, + "tableOfContents": { + "type": "array", + "items": { + "type": "string", + "format": "uint64" + }, + "description": "Table of contents lists data sections within the tenant\nservice region. The offsets are absolute.\n\nThe interpretation of the table of contents is specific\nto the metadata format version. By default, the sections are:\n - 0: profiles.parquet\n - 1: index.tsdb\n - 2: symbols.symdb" + }, + "size": { + "type": "string", + "format": "uint64", + "description": "Size of the section in bytes." + }, + "profileTypes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Profile types present in the tenant service data." + } + }, + "description": "TenantService object points to the offset in the block at which\nthe tenant service data is located." + }, "v1TimeSeriesAggregationType": { "type": "string", "enum": [ @@ -1501,6 +1930,60 @@ ], "default": "TIME_SERIES_AGGREGATION_TYPE_SUM" }, + "v1TimeSeriesQuery": { + "type": "object", + "properties": { + "step": { + "type": "number", + "format": "double" + }, + "groupBy": { + "type": "array", + "items": { + "type": "string" + } + }, + "aggregation": { + "$ref": "#/definitions/v1TimeSeriesAggregationType" + } + } + }, + "v1TimeSeriesReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1TimeSeriesQuery" + }, + "timeSeries": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1Series" + } + } + } + }, + "v1TreeQuery": { + "type": "object", + "properties": { + "maxNodes": { + "type": "string", + "format": "int64" + } + } + }, + "v1TreeReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1TreeQuery" + }, + "tree": { + "type": "string", + "format": "byte" + } + } + }, "v1ValueType": { "type": "object", "properties": { diff --git a/api/querybackend/v1/querybackend.proto b/api/querybackend/v1/querybackend.proto new file mode 100644 index 0000000000..c368f7a633 --- /dev/null +++ b/api/querybackend/v1/querybackend.proto @@ -0,0 +1,145 @@ +syntax = "proto3"; + +package querybackend.v1; + +import "google/v1/profile.proto"; +import "metastore/v1/metastore.proto"; +import "types/v1/types.proto"; + +service QueryBackendService { + rpc Invoke(InvokeRequest) returns (InvokeResponse) {} +} + +message InvokeOptions { + // Query workers might not have access to the tenant + // overrides, therefore all the necessary options should + // be listed in the request explicitly. +} + +message InvokeRequest { + repeated string tenant = 1; + int64 start_time = 2; + int64 end_time = 3; + string label_selector = 4; + repeated Query query = 5; + QueryPlan query_plan = 6; + InvokeOptions options = 7; +} + +// Query plan is represented by a DAG, where each node +// might be either "merge" or "read" (leaves). Each node +// references a range: merge nodes refer to other nodes, +// while read nodes refer to the blocks. +message QueryPlan { + // Each node is encoded with 3 values: + // - node type: 0 - read, 1 - merge; + // - range offset; + // - range length. + repeated uint32 graph = 1; + + // The blocks matching the query. + repeated metastore.v1.BlockMeta blocks = 2; + // TODO: Use opaque byte array to avoid unnecessary + // proto encoding/decoding overhead in transient nodes. + // Graph nodes reference ranges, thus each range could + // be a blob of bytes: + // repeated bytes block_ranges = 2; +} + +message Query { + QueryType query_type = 1; + // Exactly one of the following fields should be set, + // depending on the query type. + LabelNamesQuery label_names = 2; + LabelValuesQuery label_values = 3; + SeriesLabelsQuery series_labels = 4; + TimeSeriesQuery time_series = 5; + TreeQuery tree = 6; + // pprof + // function_details + // call_graph + // top_table + // ... +} + +enum QueryType { + QUERY_UNSPECIFIED = 0; + QUERY_LABEL_NAMES = 1; + QUERY_LABEL_VALUES = 2; + QUERY_SERIES_LABELS = 3; + QUERY_TIME_SERIES = 4; + QUERY_TREE = 5; +} + +message InvokeResponse { + repeated Report reports = 1; + Diagnostics diagnostics = 2; +} + +// Diagnostic messages, events, statistics, analytics, etc. +message Diagnostics {} + +message Report { + ReportType report_type = 1; + // Exactly one of the following fields should be set, + // depending on the report type. + LabelNamesReport label_names = 2; + LabelValuesReport label_values = 3; + SeriesLabelsReport series_labels = 4; + TimeSeriesReport time_series = 5; + TreeReport tree = 6; +} + +enum ReportType { + REPORT_UNSPECIFIED = 0; + REPORT_LABEL_NAMES = 1; + REPORT_LABEL_VALUES = 2; + REPORT_SERIES_LABELS = 3; + REPORT_TIME_SERIES = 4; + REPORT_TREE = 5; +} + +message LabelNamesQuery {} + +message LabelNamesReport { + LabelNamesQuery query = 1; + repeated string label_names = 2; +} + +message LabelValuesQuery { + string label_name = 1; +} + +message LabelValuesReport { + LabelValuesQuery query = 1; + repeated string label_values = 2; +} + +message SeriesLabelsQuery { + repeated string label_names = 1; +} + +message SeriesLabelsReport { + SeriesLabelsQuery query = 1; + repeated types.v1.Labels series_labels = 2; +} + +message TimeSeriesQuery { + double step = 1; + repeated string group_by = 2; + optional types.v1.TimeSeriesAggregationType aggregation = 3; +} + +message TimeSeriesReport { + TimeSeriesQuery query = 1; + repeated types.v1.Series time_series = 2; +} + +message TreeQuery { + int64 max_nodes = 1; +} + +message TreeReport { + TreeQuery query = 1; + bytes tree = 2; +} diff --git a/go.mod b/go.mod index 58552f8958..f09e864612 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,9 @@ require ( github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/hashicorp/raft v1.7.0 + github.com/hashicorp/raft-wal v0.4.1 + github.com/iancoleman/strcase v0.3.0 github.com/json-iterator/go v1.1.12 github.com/k0kubun/pp/v3 v3.2.0 github.com/klauspost/compress v1.17.9 @@ -66,6 +69,7 @@ require ( github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/valyala/bytebufferpool v1.0.0 github.com/xlab/treeprint v1.2.0 + go.etcd.io/bbolt v1.3.10 go.opentelemetry.io/proto/otlp v1.1.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 @@ -117,16 +121,21 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect + github.com/benbjohnson/immutable v0.4.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/chainguard-dev/git-urls v1.0.2 // indirect github.com/clbanning/mxj v1.8.4 // indirect + github.com/coreos/etcd v3.3.27+incompatible // indirect github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dolthub/maphash v0.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/efficientgo/core v1.0.0-rc.2 // indirect github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -148,7 +157,9 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.3 // indirect @@ -156,9 +167,10 @@ require ( github.com/hashicorp/consul/api v1.28.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v1.1.5 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect @@ -166,6 +178,7 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect @@ -200,6 +213,7 @@ require ( github.com/segmentio/encoding v0.3.6 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/tencentyun/cos-go-sdk-v5 v0.7.40 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect @@ -224,12 +238,17 @@ require ( google.golang.org/api v0.172.0 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.29.2 // indirect k8s.io/apimachinery v0.29.2 // indirect k8s.io/client-go v0.29.2 // indirect k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace ( diff --git a/go.sum b/go.sum index 8ed60b7dd1..d19e05c369 100644 --- a/go.sum +++ b/go.sum @@ -129,6 +129,8 @@ github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA= +github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -159,10 +161,16 @@ github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 h1:FpZSn61BWXb github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381 h1:d5EKgQfRQvO97jnISfR89AiCCCJMwMFoSxUiU0OGCRU= github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381/go.mod h1:OU76gHeRo8xrzGJU3F3I1CqX1ekM8dfJw0+wPeMwnp0= +github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA= +github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf h1:GOPo6vn/vTN+3IwZBvXX0y5doJfSC7My0cdzelyOCsQ= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -206,6 +214,8 @@ github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/Ir github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -330,6 +340,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v58 v58.0.1-0.20240111193443-e9f52699f5e5 h1:Cm3eMs9Qj7fqDQOascVTJg37N0T7Vb2foS//WopCpWw= @@ -405,14 +416,16 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -442,6 +455,10 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= +github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft-wal v0.4.1 h1:aU8XZ6x8R9BAIB/83Z1dTDtXvDVmv9YVYeXxd/1QBSA= +github.com/hashicorp/raft-wal v0.4.1/go.mod h1:A6vP5o8hGOs1LHfC1Okh9xPwWDcmb6Vvuz/QyqUXlOE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= @@ -450,6 +467,8 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -714,6 +733,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= diff --git a/go.work.sum b/go.work.sum index fca2569ad2..10144c38ff 100644 --- a/go.work.sum +++ b/go.work.sum @@ -524,6 +524,8 @@ github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb/go.mod h1:WsAABb github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/KimMachineGun/automemlimit v0.5.0 h1:BeOe+BbJc8L5chL3OwzVYjVzyvPALdd5wxVVOWuUZmQ= github.com/KimMachineGun/automemlimit v0.5.0/go.mod h1:di3GCKiu9Y+1fs92erCbUvKzPkNyViN3mA0vti/ykEQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= @@ -542,6 +544,8 @@ github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdII github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409 h1:DTQ/38ao/CfXsrK0cSAL+h4R/u0VVvfWLZEOlLwEROI= +github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6 h1:5kUcJJAKWWI82Xnp/CaU0eu5hLlHkmm9acjowSkwCd0= +github.com/Sereal/Sereal/Go/sereal v0.0.0-20231009093132-b9187f1a92c6/go.mod h1:JwrycNnC8+sZPDyzM3MQ86LvaGzSpfxg885KOOwFRW4= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw= @@ -576,11 +580,17 @@ github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1 h1:w/fPGB0t5rWwA43mux4e9o github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/benmathews/bench v0.0.0-20210120214102-f7c75b9ef6e7 h1:nYTgFk9sOL3rmNew6rR2anUWWCzmSYPMJiSmowV8Yls= +github.com/benmathews/bench v0.0.0-20210120214102-f7c75b9ef6e7/go.mod h1:peX7BEhSFSvvnxdido50pUMhlFi24dVgtTU1oZkHTUU= +github.com/benmathews/hdrhistogram-writer v0.0.0-20210120211942-3cb1c7c33f95 h1:tAEzz8rP6JRzrARM5HecEuhY23qL2CSGRTFcNzwjOWI= +github.com/benmathews/hdrhistogram-writer v0.0.0-20210120211942-3cb1c7c33f95/go.mod h1:2MBckC8FahPaeLz58Qe6ZyVKm8UU1gHkINEv9Sw7pnI= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bsm/redislock v0.9.1 h1:uTTZU82xg2PjI8X5T9PGcX/5k1FX3Id7bqkwy1As6c0= github.com/bsm/redislock v0.9.1/go.mod h1:ToFoB1xQbOJYG7e2ZBiPXotlhImqWgEa4+u/lLQ1nSc= github.com/casbin/casbin/v2 v2.37.0 h1:/poEwPSovi4bTOcP752/CsTQiRz2xycyVKFG7GUhbDw= @@ -634,6 +644,8 @@ github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= @@ -690,6 +702,8 @@ github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmrid github.com/go-openapi/swag v0.22.6/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= @@ -749,7 +763,6 @@ github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pS github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9 h1:OF1IPgv+F4NmqmJ98KTjdN97Vs1JxDPB3vbmYzV2dpk= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -795,7 +808,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -804,6 +816,10 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0 h1:CO8dBMLH6dvE1jTn/30ZZw3iuPsNfajshWoJTnVc5cc= +github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= +github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw= +github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hetznercloud/hcloud-go/v2 v2.4.0/go.mod h1:l7fA5xsncFBzQTyw29/dw5Yr88yEGKKdc6BHf24ONS0= @@ -814,8 +830,6 @@ github.com/hudl/fargo v1.4.0 h1:ZDDILMbB37UlAVLlWcJ2Iz1XuahZZTDZfdCKeclfq2s= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= -github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -837,6 +851,7 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/knz/go-libedit v1.10.1 h1:0pHpWtx9vcvC0xGZqEQlQdfSQs7WRlAjuPvk3fOZDCo= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -951,6 +966,8 @@ github.com/performancecopilot/speed/v4 v4.0.0 h1:VxEDCmdkfbQYDlcr/GC9YoN9PQ6p8ul github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -959,6 +976,8 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20= +github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -996,6 +1015,8 @@ github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybL github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/shoenig/test v0.6.6 h1:Oe8TPH9wAbv++YPNDKJWUnI8Q4PPWCx3UbOfH+FxiMU= github.com/shoenig/test v0.6.6/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= @@ -1029,7 +1050,6 @@ github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5J github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= @@ -1038,6 +1058,8 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194 h1:YB6qJyC github.com/thanos-io/objstore v0.0.0-20230727115635-d0c43443ecda h1:DtxaU/a7QRPiUhwtPrZFlS81y+9Mgny4KoLq65cu04U= github.com/thanos-io/objstore v0.0.0-20230727115635-d0c43443ecda/go.mod h1:IS7Z25+0KaknyU2P5PTP/5hwY6Yr/FzbInF88Yd5auU= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= @@ -1074,6 +1096,8 @@ go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsX go.etcd.io/etcd/client/v2 v2.305.4 h1:Dcx3/MYyfKcPNLpR4VVQUP5KgYrBeJtktBwEKkw08Ao= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= @@ -1219,6 +1243,8 @@ golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= @@ -1367,6 +1393,8 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= @@ -1377,6 +1405,8 @@ gopkg.in/telebot.v3 v3.2.1 h1:3I4LohaAyJBiivGmkfB+CiVu7QFOWkuZ4+KHgO/G3rs= gopkg.in/telebot.v3 v3.2.1/go.mod h1:GJKwwWqp9nSkIVN51eRKU78aB5f5OnQuWdwiIZfPbko= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/vmihailenco/msgpack.v2 v2.9.2 h1:gjPqo9orRVlSAH/065qw3MsFCDpH7fa1KpiizXyllY4= +gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1391,11 +1421,14 @@ k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +nullprogram.com/x/optparse v1.0.0 h1:xGFgVi5ZaWOnYdac2foDT3vg0ZZC9ErXFV57mr4OHrI= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= diff --git a/pkg/experiment/compactor/compaction_worker.go b/pkg/experiment/compactor/compaction_worker.go new file mode 100644 index 0000000000..5a8964aaf2 --- /dev/null +++ b/pkg/experiment/compactor/compaction_worker.go @@ -0,0 +1,297 @@ +package compactor + +import ( + "context" + "flag" + "fmt" + "os" + "path/filepath" + "runtime/debug" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/services" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/objstore" +) + +type Worker struct { + *services.BasicService + + config Config + logger log.Logger + metastoreClient *metastoreclient.Client + storage objstore.Bucket + metrics *compactionWorkerMetrics + + jobMutex sync.RWMutex + pendingJobs map[string]*compactorv1.CompactionJob + activeJobs map[string]*compactorv1.CompactionJob + completedJobs map[string]*compactorv1.CompactionJobStatus + + queue chan *compactorv1.CompactionJob +} + +type Config struct { + JobCapacity int `yaml:"job_capacity"` + SmallObjectSize int `yaml:"small_object_size_bytes"` + TempDir string `yaml:"temp_dir"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + const prefix = "compaction-worker." + tempdir := filepath.Join(os.TempDir(), "pyroscope-compactor") + f.IntVar(&cfg.JobCapacity, prefix+"job-capacity", 3, "how many concurrent jobs will a worker run at most") + f.IntVar(&cfg.SmallObjectSize, prefix+"small-object-size-bytes", 8<<20, "size of the object that can be loaded in memory") + f.StringVar(&cfg.TempDir, prefix+"temp-dir", tempdir, "temporary directory for compaction jobs") +} + +func New(config Config, logger log.Logger, metastoreClient *metastoreclient.Client, storage objstore.Bucket, reg prometheus.Registerer) (*Worker, error) { + w := &Worker{ + config: config, + logger: logger, + metastoreClient: metastoreClient, + storage: storage, + pendingJobs: make(map[string]*compactorv1.CompactionJob), + activeJobs: make(map[string]*compactorv1.CompactionJob), + completedJobs: make(map[string]*compactorv1.CompactionJobStatus), + metrics: newMetrics(reg), + queue: make(chan *compactorv1.CompactionJob, 2*config.JobCapacity), + } + w.BasicService = services.NewBasicService(w.starting, w.running, w.stopping) + return w, nil +} + +func (w *Worker) starting(ctx context.Context) (err error) { + return nil +} + +func (w *Worker) running(ctx context.Context) error { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + go func() { + for { + select { + case <-ctx.Done(): + return + + case job := <-w.queue: + w.jobMutex.Lock() + delete(w.pendingJobs, job.Name) + w.activeJobs[job.Name] = job + w.jobMutex.Unlock() + + _ = level.Info(w.logger).Log("msg", "starting compaction job", "job", job.Name) + status := w.startJob(ctx, job) + _ = level.Info(w.logger).Log("msg", "compaction job finished", "job", job.Name) + + w.jobMutex.Lock() + delete(w.activeJobs, job.Name) + w.completedJobs[job.Name] = status + w.jobMutex.Unlock() + } + } + }() + + for { + select { + case <-ticker.C: + w.poll(ctx) + + case <-ctx.Done(): + return nil + } + } +} + +func (w *Worker) poll(ctx context.Context) { + w.jobMutex.Lock() + level.Debug(w.logger).Log( + "msg", "polling for compaction jobs and status updates", + "active_jobs", len(w.activeJobs), + "pending_jobs", len(w.pendingJobs), + "pending_updates", len(w.completedJobs)) + + pendingStatusUpdates := make([]*compactorv1.CompactionJobStatus, 0, len(w.completedJobs)) + for _, update := range w.completedJobs { + level.Debug(w.logger).Log("msg", "completed job update", "job", update.JobName, "status", update.Status) + pendingStatusUpdates = append(pendingStatusUpdates, update) + } + for _, activeJob := range w.activeJobs { + level.Debug(w.logger).Log("msg", "in progress job update", "job", activeJob.Name) + update := activeJob.Status.CloneVT() + update.Status = compactorv1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS + pendingStatusUpdates = append(pendingStatusUpdates, update) + } + for _, pendingJob := range w.pendingJobs { + level.Debug(w.logger).Log("msg", "pending job update", "job", pendingJob.Name) + update := pendingJob.Status.CloneVT() + update.Status = compactorv1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS + pendingStatusUpdates = append(pendingStatusUpdates, update) + } + + jobCapacity := w.config.JobCapacity - len(w.activeJobs) - len(w.pendingJobs) + if jobCapacity < 0 { + jobCapacity = 0 + } + w.jobMutex.Unlock() + + if len(pendingStatusUpdates) > 0 || jobCapacity > 0 { + jobsResponse, err := w.metastoreClient.PollCompactionJobs(ctx, &compactorv1.PollCompactionJobsRequest{ + JobStatusUpdates: pendingStatusUpdates, + JobCapacity: uint32(jobCapacity), + }) + + if err != nil { + level.Error(w.logger).Log("msg", "failed to poll compaction jobs", "err", err) + return + } + + level.Debug(w.logger).Log("msg", "poll response received", "compaction_jobs", len(jobsResponse.CompactionJobs)) + + pendingJobs := make([]*compactorv1.CompactionJob, 0, len(jobsResponse.CompactionJobs)) + for _, job := range jobsResponse.CompactionJobs { + pendingJobs = append(pendingJobs, job.CloneVT()) + } + + w.jobMutex.Lock() + for _, update := range pendingStatusUpdates { + delete(w.completedJobs, update.JobName) + } + for _, job := range pendingJobs { + w.pendingJobs[job.Name] = job + } + w.jobMutex.Unlock() + + for _, job := range pendingJobs { + select { + case w.queue <- job: + default: + level.Warn(w.logger).Log("msg", "dropping job", "job_name", job.Name) + w.jobMutex.Lock() + delete(w.pendingJobs, job.Name) + w.jobMutex.Unlock() + } + } + } +} + +func (w *Worker) stopping(err error) error { + // TODO aleks: handle shutdown + return nil +} + +func (w *Worker) startJob(ctx context.Context, job *compactorv1.CompactionJob) *compactorv1.CompactionJobStatus { + jobStartTime := time.Now() + labels := []string{job.TenantId, fmt.Sprint(job.Shard), fmt.Sprint(job.CompactionLevel)} + statusName := "unknown" + defer func() { + elapsed := time.Since(jobStartTime) + jobStatusLabel := append(labels, statusName) + w.metrics.jobDuration.WithLabelValues(jobStatusLabel...).Observe(elapsed.Seconds()) + w.metrics.jobsCompleted.WithLabelValues(jobStatusLabel...).Inc() + w.metrics.jobsInProgress.WithLabelValues(labels...).Dec() + }() + w.metrics.jobsInProgress.WithLabelValues(labels...).Inc() + + sp, ctx := opentracing.StartSpanFromContext(ctx, "StartCompactionJob", + opentracing.Tag{Key: "Job", Value: job.String()}, + opentracing.Tag{Key: "Tenant", Value: job.TenantId}, + opentracing.Tag{Key: "Shard", Value: job.Shard}, + opentracing.Tag{Key: "CompactionLevel", Value: job.CompactionLevel}, + opentracing.Tag{Key: "BlockCount", Value: len(job.Blocks)}, + ) + defer sp.Finish() + + _ = level.Info(w.logger).Log( + "msg", "compacting blocks for job", + "job", job.Name, + "blocks", len(job.Blocks)) + + tempdir := filepath.Join(w.config.TempDir, job.Name) + sourcedir := filepath.Join(tempdir, "source") + // TODO(kolesnikovae): Return the actual error once we + // can handle compaction failures in metastore. + compacted, err := pretendEverythingIsOK(func() ([]*metastorev1.BlockMeta, error) { + return block.Compact(ctx, job.Blocks, w.storage, + block.WithCompactionTempDir(tempdir), + block.WithCompactionObjectOptions( + block.WithObjectMaxSizeLoadInMemory(w.config.SmallObjectSize), + block.WithObjectDownload(sourcedir), + ), + ) + }) + + logger := log.With(w.logger, + "job_name", job.Name, + "job_shard", job.Shard, + "job_tenant", job.TenantId, + "job_compaction_level", job.CompactionLevel, + ) + + switch { + case err == nil: + _ = level.Info(logger).Log( + "msg", "successful compaction for job", + "input_blocks", len(job.Blocks), + "output_blocks", len(compacted)) + + for _, c := range compacted { + _ = level.Info(logger).Log( + "msg", "new compacted block", + "block_id", c.Id, + "block_tenant", c.TenantId, + "block_shard", c.Shard, + "block_size", c.Size, + "block_compaction_level", c.CompactionLevel, + "block_min_time", c.MinTime, + "block_max_time", c.MinTime, + "tenant_services", len(c.TenantServices)) + } + + job.Status.Status = compactorv1.CompactionStatus_COMPACTION_STATUS_SUCCESS + job.Status.CompletedJob = &compactorv1.CompletedJob{Blocks: compacted} + statusName = "success" + + case errors.Is(err, context.Canceled): + _ = level.Warn(logger).Log("msg", "job cancelled", "job", job.Name) + job.Status.Status = compactorv1.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED + statusName = "cancelled" + + default: + _ = level.Error(logger).Log("msg", "failed to compact blocks", "err", err, "job", job.Name) + job.Status.Status = compactorv1.CompactionStatus_COMPACTION_STATUS_FAILURE + statusName = "failure" + } + + return job.Status +} + +func pretendEverythingIsOK(fn func() ([]*metastorev1.BlockMeta, error)) (m []*metastorev1.BlockMeta, err error) { + defer func() { + if r := recover(); r != nil { + fmt.Println("ignoring compaction panic:", r) + fmt.Println(string(debug.Stack())) + m = nil + } + if err != nil { + if errors.Is(err, context.Canceled) { + // We can handle this. + return + } + fmt.Println("ignoring compaction error:", err) + m = nil + } + err = nil + }() + return fn() +} diff --git a/pkg/experiment/compactor/compaction_worker_metrics.go b/pkg/experiment/compactor/compaction_worker_metrics.go new file mode 100644 index 0000000000..122bfc3401 --- /dev/null +++ b/pkg/experiment/compactor/compaction_worker_metrics.go @@ -0,0 +1,36 @@ +package compactor + +import "github.com/prometheus/client_golang/prometheus" + +type compactionWorkerMetrics struct { + jobsCompleted *prometheus.CounterVec + jobsInProgress *prometheus.GaugeVec + jobDuration *prometheus.HistogramVec +} + +func newMetrics(r prometheus.Registerer) *compactionWorkerMetrics { + m := &compactionWorkerMetrics{} + + m.jobsCompleted = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "pyroscope_compaction_jobs_completed_count", + Help: "Total number of compactions that were executed.", + }, []string{"tenant", "shard", "level", "outcome"}) + m.jobsInProgress = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pyroscope_compaction_jobs_current", + Help: "The number of active compaction jobs per level", + }, []string{"tenant", "shard", "level"}) + m.jobDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "pyroscope_compaction_jobs_duration_seconds", + Help: "Duration of compaction job runs", + Buckets: prometheus.ExponentialBuckets(1, 2, 14), + }, []string{"tenant", "shard", "level", "outcome"}) + + if r != nil { + r.MustRegister( + m.jobsCompleted, + m.jobsInProgress, + m.jobDuration, + ) + } + return m +} diff --git a/pkg/experiment/distributor/distributor_series.go b/pkg/experiment/distributor/distributor_series.go new file mode 100644 index 0000000000..935cdcc014 --- /dev/null +++ b/pkg/experiment/distributor/distributor_series.go @@ -0,0 +1,176 @@ +package distributor + +import ( + "fmt" + "hash/fnv" + + "github.com/grafana/dskit/ring" + + v1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + distributormodel "github.com/grafana/pyroscope/pkg/distributor/model" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +// FIXME(kolesnikovae): +// 1. Essentially, we do not need dskit ring. Instead, it would be better to access +// the memberlist/serf directly and build the distribution from there (generating +// tokens as we want). Or, alternatively, we could implement BasicLifecyclerDelegate +// interface. +// 2. Ensure we have access to all ingester instances, regardless of their state. +// The ring exposes only healthy instances, which is not what we want, and this +// will lead to vast shard relocations and will deteriorate data locality if +// instances leave and join the ring frequently. +// Currently, the heartbeat timeout is set to 1m by default, which should prevent +// us from severe problems, but it's still a problem. +// 3. Health checks are useless. It's better to sorry than to ask for permission: +// client should mark failed/slow instances and not rely on the ring to do so. +// 4. Implement stream statistics (see seriesPlacement interface). This could be done +// using Count-Min Sketch, or Count-Min-Log Sketch, or HyperLogLog(+(+)). +// 5. Push API should be streaming: use of batching is not efficient. + +type seriesDistributor struct { + tenantServices map[tenantServiceKey]*tenantServicePlacement + seriesPlacement seriesPlacement + distribution *distribution +} + +type seriesPlacement interface { + tenantServiceSize(tenantServiceKey, []shard) int + tenantServiceSeriesShard(*distributormodel.ProfileSeries, []shard) int +} + +type defaultSeriesPlacement struct{} + +func (defaultSeriesPlacement) tenantServiceSize(k tenantServiceKey, shards []shard) int { return 2 } + +func (defaultSeriesPlacement) tenantServiceSeriesShard(s *distributormodel.ProfileSeries, shards []shard) int { + k := fnv64(phlaremodel.LabelPairsString(s.Labels)) + return int(k % uint64(len(shards))) +} + +func newSeriesDistributor(r ring.ReadRing) (d *seriesDistributor, err error) { + d = &seriesDistributor{seriesPlacement: defaultSeriesPlacement{}} + if d.distribution, err = getDistribution(r, maxDistributionAge); err != nil { + return nil, fmt.Errorf("series distributor: %w", err) + } + d.tenantServices = make(map[tenantServiceKey]*tenantServicePlacement) + return d, nil +} + +func (d *seriesDistributor) buildRequests(tenantID string, series []*distributormodel.ProfileSeries) []*ingestionRequest { + var size int + for _, s := range series { + d.append(tenantID, s) + size++ + } + trackers := make([]*ingestionRequest, 0, size) + for _, p := range d.tenantServices { + for _, s := range p.series { + t := &ingestionRequest{profile: s} + t.shard, t.instances = p.pickShard(s) + trackers = append(trackers, t) + } + } + // Do not retain the series, but do keep shards: + // profileTracker references ring instances. + d.tenantServices = nil + return trackers +} + +func (d *seriesDistributor) append(tenant string, s *distributormodel.ProfileSeries) { + k := newTenantServiceKey(tenant, s.Labels) + p, ok := d.tenantServices[k] + if !ok { + p = d.newTenantServicePlacement(k) + d.tenantServices[k] = p + } + p.series = append(p.series, s) +} + +// Although a request may contain multiple series +// that belong to different services, the tenant is +// always the same (as of now). +type tenantServiceKey struct { + tenant string + service string +} + +func newTenantServiceKey(tenant string, seriesLabels []*v1.LabelPair) tenantServiceKey { + service := phlaremodel.Labels(seriesLabels).Get(phlaremodel.LabelNameServiceName) + return tenantServiceKey{ + tenant: tenant, + service: service, + } +} + +func (k tenantServiceKey) hash() uint64 { return fnv64(k.tenant, k.service) } + +const minShardsPerTenantService = 3 + +func (d *seriesDistributor) newTenantServicePlacement(key tenantServiceKey) *tenantServicePlacement { + size := d.seriesPlacement.tenantServiceSize(key, d.distribution.shards) + if size <= 0 { + size = len(d.distribution.shards) + } + return &tenantServicePlacement{ + seriesDistributor: d, + tenantServiceKey: key, + series: make([]*distributormodel.ProfileSeries, 0, 16), + // scope is a slice of shards that belong to the service. + // It might be larger than the actual number of shards allowed for use. + // In case of a delivery failure, at least minShardsPerTenantService + // options of the shard placement (instances) are available: the series + // will be sent to another shard location (ingester), but will still be + // associated with the shard. + scope: d.distribution.serviceShards(max(size, minShardsPerTenantService), key.hash()), + size: size, + } +} + +type tenantServicePlacement struct { + *seriesDistributor + tenantServiceKey + series []*distributormodel.ProfileSeries + scope []shard + size int +} + +// Pick the exact shard for the key from N options +// and find instances where the shard may be placed. +func (p *tenantServicePlacement) pickShard(s *distributormodel.ProfileSeries) (uint32, []*ring.InstanceDesc) { + // Limit the scope for selection to the actual number + // of shards, allowed for the tenant service. + i := p.seriesPlacement.tenantServiceSeriesShard(s, p.scope[:p.size]) + x := p.scope[i] + instances := make([]*ring.InstanceDesc, len(p.scope)) + for j, o := range p.scope { + instances[j] = &p.distribution.desc[o.instance] + } + instances[0], instances[i] = instances[i], instances[0] + return x.id, instances +} + +type ingestionRequest struct { + profile *distributormodel.ProfileSeries + // Note that the instances reference shared objects, and must not be modified. + instances []*ring.InstanceDesc + shard uint32 +} + +func (p *ingestionRequest) next() (instance *ring.InstanceDesc, ok bool) { + for len(p.instances) > 0 { + instance, p.instances = p.instances[0], p.instances[1:] + if instance.State == ring.ACTIVE { + return instance, true + } + } + return nil, false +} + +func fnv64(keys ...string) uint64 { + h := fnv.New64a() + for _, k := range keys { + _, _ = h.Write([]byte(k)) + } + return h.Sum64() +} diff --git a/pkg/experiment/distributor/distributor_sharding.go b/pkg/experiment/distributor/distributor_sharding.go new file mode 100644 index 0000000000..cdb54232c3 --- /dev/null +++ b/pkg/experiment/distributor/distributor_sharding.go @@ -0,0 +1,129 @@ +package distributor + +import ( + "fmt" + "math/rand" + "slices" + "strings" + "sync" + "time" + + "github.com/grafana/dskit/ring" +) + +var ( + distributionCache sync.RWMutex + cachedDistribution *distribution +) + +const maxDistributionAge = time.Second * 5 + +func getDistribution(r ring.ReadRing, maxAge time.Duration) (*distribution, error) { + distributionCache.RLock() + d := cachedDistribution + if d != nil && !d.isExpired(maxAge) { + distributionCache.RUnlock() + return d, nil + } + distributionCache.RUnlock() + distributionCache.Lock() + defer distributionCache.Unlock() + if d != nil && !d.isExpired(maxAge) { + return d, nil + } + var shards = 64 + var instances = 128 + if d != nil { + shards = len(d.shards) + instances = len(d.desc) + } + n := newDistribution(shards, instances) + if err := n.readRing(r); err != nil { + return nil, fmt.Errorf("failed to read ring: %w", err) + } + cachedDistribution = n + return n, nil +} + +type distribution struct { + timestamp time.Time + shards []shard + desc []ring.InstanceDesc +} + +type shard struct { + id uint32 // 0 shard ID is used as a sentinel (zero value is invalid). + instance uint32 // references the instance in shards.desc. +} + +func newDistribution(shards, instances int) *distribution { + return &distribution{ + shards: make([]shard, 0, shards), + desc: make([]ring.InstanceDesc, 0, instances), + timestamp: time.Now(), + } +} + +func (d *distribution) isExpired(maxAge time.Duration) bool { + return time.Now().Add(-maxAge).After(d.timestamp) +} + +func (d *distribution) readRing(r ring.ReadRing) error { + all, err := r.GetAllHealthy(ring.Write) + if err != nil { + return err + } + if len(all.Instances) == 0 { + return ring.ErrEmptyRing + } + d.desc = all.Instances + // Jump hashing needs order. + slices.SortFunc(d.desc, func(a, b ring.InstanceDesc) int { + return strings.Compare(a.Id, b.Id) + }) + i := uint32(0) + for j := range all.Instances { + for range all.Instances[j].Tokens { + i++ + d.shards = append(d.shards, shard{ + id: i, + instance: uint32(j), + }) + } + } + return nil +} + +// The constant determines which keys are generated for the +// jump hashing function. A generated value is added to the +// tenant service key hash to produce the next jump hashing +// key. The seed is fixed to ensure deterministic behaviour +// across instances. The value is a random generated with a +// crypto/rand.Read, and decoded as a little-endian uint64. +const serviceShardsRandSeed = 4349676827832284783 + +func (d *distribution) serviceShards(n int, service uint64) []shard { + // TODO(kolesnikovae): Precompute the jump hash keys (e.g., 1K should be enough). + rnd := rand.New(rand.NewSource(serviceShardsRandSeed)) + m := len(d.shards) + if m < n { + n = m + } + s := make([]shard, 0, n) + for i := 0; i < n; i++ { + j := jump(service&^rnd.Uint64(), m) + s = append(s, d.shards[j]) + } + return s +} + +// https://arxiv.org/pdf/1406.2294 +func jump(key uint64, buckets int) int { + var b, j = -1, 0 + for j < buckets { + b = j + key = key*2862933555777941757 + 1 + j = int(float64(b+1) * (float64(int64(1)<<31) / float64((key>>33)+1))) + } + return b +} diff --git a/pkg/experiment/distributor/singlereplica/singlereplica.go b/pkg/experiment/distributor/singlereplica/singlereplica.go new file mode 100644 index 0000000000..4ea150a30d --- /dev/null +++ b/pkg/experiment/distributor/singlereplica/singlereplica.go @@ -0,0 +1,25 @@ +package singlereplica + +import ( + "time" + + "github.com/grafana/dskit/ring" +) + +// The replication strategy that returns all the instances, regardless +// of their health and placement to allow the caller to decide which +// instances to use on its own. + +type replicationStrategy struct{} + +func (replicationStrategy) Filter( + instances []ring.InstanceDesc, + _ ring.Operation, + _ int, + _ time.Duration, + _ bool, +) ([]ring.InstanceDesc, int, error) { + return instances, 0, nil +} + +func NewReplicationStrategy() ring.ReplicationStrategy { return replicationStrategy{} } diff --git a/pkg/experiment/ingester/loki/index/buf.go b/pkg/experiment/ingester/loki/index/buf.go new file mode 100644 index 0000000000..f9d8f23391 --- /dev/null +++ b/pkg/experiment/ingester/loki/index/buf.go @@ -0,0 +1,114 @@ +package index + +import ( + "bytes" + "fmt" + "io" + "sync" + + "github.com/pkg/errors" +) + +type BufferWriter struct { + buf *bytes.Buffer + pos uint64 +} + +var pool = sync.Pool{ + New: func() interface{} { + return NewBufferWriter() + }, +} + +func GetBufferWriterFromPool() *BufferWriter { + res := pool.Get().(*BufferWriter) + res.Reset() + return res +} + +func PutBufferWriterToPool(fw *BufferWriter) { + fw.Reset() + pool.Put(fw) +} + +// NewBufferWriter returns a new BufferWriter. +// todo: pooling memory +func NewBufferWriter() *BufferWriter { + return &BufferWriter{ + buf: bytes.NewBuffer(make([]byte, 0, 0x2000)), + pos: 0, + } +} + +func (fw *BufferWriter) Pos() uint64 { + return fw.pos +} + +func (fw *BufferWriter) Write(bufs ...[]byte) error { + for _, buf := range bufs { + n, err := fw.buf.Write(buf) + if err != nil { + return err + } + fw.pos += uint64(n) + } + return nil +} + +func (fw *BufferWriter) Flush() error { + return nil +} + +func (fw *BufferWriter) WriteAt(buf []byte, pos uint64) error { + if pos > fw.pos { + return fmt.Errorf("position out of range") + } + if pos+uint64(len(buf)) > fw.pos { + return fmt.Errorf("write exceeds buffer size") + } + copy(fw.buf.Bytes()[pos:], buf) + return nil +} + +func (fw *BufferWriter) Read(buf []byte) (int, error) { + return fw.buf.Read(buf) +} + +func (fw *BufferWriter) ReadFrom(r io.Reader) (int64, error) { + n, err := fw.buf.ReadFrom(r) + if err != nil { + return n, err + } + fw.pos += uint64(n) + return n, err +} + +func (fw *BufferWriter) AddPadding(size int) error { + p := fw.pos % uint64(size) + if p == 0 { + return nil + } + p = uint64(size) - p + + if err := fw.Write(make([]byte, p)); err != nil { + return errors.Wrap(err, "add padding") + } + return nil +} + +func (fw *BufferWriter) Buffer() ([]byte, io.Closer, error) { + return fw.buf.Bytes(), io.NopCloser(nil), nil +} + +func (fw *BufferWriter) Close() error { + return nil +} + +func (fw *BufferWriter) Reset() { + fw.pos = 0 + fw.buf.Reset() +} + +func (fw *BufferWriter) Remove() error { + return nil +} diff --git a/pkg/experiment/ingester/loki/index/cmp.go b/pkg/experiment/ingester/loki/index/cmp.go new file mode 100644 index 0000000000..db87443a9d --- /dev/null +++ b/pkg/experiment/ingester/loki/index/cmp.go @@ -0,0 +1,76 @@ +package index + +import ( + "bytes" + "context" + "fmt" + "os" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" +) + +type IIndexWriter interface { + AddSymbol(symbol string) error + AddSeries(ref storage.SeriesRef, lbs phlaremodel.Labels, fp model.Fingerprint, chunks ...index.ChunkMeta) error + Close() error +} + +func NewCompareIndexWriter(ctx context.Context, filepath string, fileWriter IIndexWriter) (*CompareIndexWriter, error) { + mem, err := NewWriter(ctx, SegmentsIndexWriterBufSize) + if err != nil { + return nil, fmt.Errorf("error creating memory index writer: %w", err) + } + return &CompareIndexWriter{ + filepath: filepath, + file: fileWriter, + mem: mem, + }, nil +} + +// This is a quick hack to test correctness of the new writer. +type CompareIndexWriter struct { + filepath string + file IIndexWriter + mem *Writer +} + +func (c *CompareIndexWriter) AddSymbol(symbol string) error { + ferr := c.file.AddSymbol(symbol) + merr := c.mem.AddSymbol(symbol) + if ferr != nil || merr != nil { + return fmt.Errorf("[CompareIndexWriter] error adding symbol: %v %v", ferr, merr) + } + return nil +} + +func (c *CompareIndexWriter) AddSeries(ref storage.SeriesRef, lbs phlaremodel.Labels, fp model.Fingerprint, chunks ...index.ChunkMeta) error { + ferr := c.file.AddSeries(ref, lbs, fp, chunks...) + merr := c.mem.AddSeries(ref, lbs, fp, chunks...) + if ferr != nil || merr != nil { + return fmt.Errorf("[CompareIndexWriter] error adding series: %v %v", ferr, merr) + } + return nil +} + +func (c *CompareIndexWriter) Close() error { + ferr := c.file.Close() + merr := c.mem.Close() + if ferr != nil || merr != nil { + return fmt.Errorf("[CompareIndexWriter] error closing index writer: %v %v", ferr, merr) + } + fileIndex, ferr := os.ReadFile(c.filepath) + if ferr != nil { + fmt.Printf("[CompareIndexWriter] error reading index file: %v\n", ferr) + return fmt.Errorf("[CompareIndexWriter] error reading index file: %v", ferr) + } + memIndex := c.mem.f.buf.Bytes() + if !bytes.Equal(fileIndex, memIndex) { + fmt.Printf("[CompareIndexWriter] index files do not match\n") + return fmt.Errorf("[CompareIndexWriter] index files do not match") + } + return nil +} diff --git a/pkg/experiment/ingester/loki/index/index.go b/pkg/experiment/ingester/loki/index/index.go new file mode 100644 index 0000000000..9833d07216 --- /dev/null +++ b/pkg/experiment/ingester/loki/index/index.go @@ -0,0 +1,1999 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A tsdb index writer, that does not use files and mmap +// To be for tiny segments in v2 POC branch +// Inspired by loki https://raw.githubusercontent.com/grafana/loki/main/pkg/storage/wal/index/index.go +// But actually copied from pyroscope and modified accordingly + +package index + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io" + "math" + "os" + "sort" + "unsafe" + + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + tsdb_enc "github.com/prometheus/prometheus/tsdb/encoding" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb/block" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/encoding" +) + +const ( + // MagicIndex 4 bytes at the head of an index file. + MagicIndex = 0xBAAAD700 + // HeaderLen represents number of bytes reserved of index for header. + HeaderLen = 5 + + // FormatV1 represents 1 version of index. + FormatV1 = 1 + // FormatV2 represents 2 version of index. + FormatV2 = 2 + + IndexFilename = "index" + + // store every 1024 series' fingerprints in the fingerprint offsets table + fingerprintInterval = 1 << 10 + + SegmentsIndexWriterBufSize = 2 * 0x1000 // small for segments + BlocksIndexWriterBufSize = 1 << 22 // large for blocks +) + +type indexWriterStage uint8 + +const ( + idxStageNone indexWriterStage = iota + idxStageSymbols + idxStageSeries + idxStageDone +) + +func (s indexWriterStage) String() string { + switch s { + case idxStageNone: + return "none" + case idxStageSymbols: + return "symbols" + case idxStageSeries: + return "series" + case idxStageDone: + return "done" + } + return "" +} + +// The table gets initialized with sync.Once but may still cause a race +// with any other use of the crc32 package anywhere. Thus we initialize it +// before. +var castagnoliTable *crc32.Table + +func init() { + castagnoliTable = crc32.MakeTable(crc32.Castagnoli) +} + +// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the +// polynomial may be easily changed in one location at a later time, if necessary. +func newCRC32() hash.Hash32 { + return crc32.New(castagnoliTable) +} + +type symbolCacheEntry struct { + index uint32 + lastValue string + lastValueIndex uint32 +} + +// Writer implements the IndexWriter interface for the standard +// serialization format. +type Writer struct { + ctx context.Context + + f *BufferWriter + + // Temporary file for postings. + fP *BufferWriter + // Temporary file for posting offsets table. + fPO *BufferWriter + cntPO uint64 + + toc TOC + stage indexWriterStage + postingsStart uint64 // Due to padding, can differ from TOC entry. + + // Reusable memory. + buf1 encoding.Encbuf + buf2 encoding.Encbuf + + numSymbols int + symbols *Symbols + symbolFile io.Closer + lastSymbol string + symbolCache map[string]symbolCacheEntry + + labelIndexes []labelIndexHashEntry // Label index offsets. + labelNames map[string]uint64 // Label names, and their usage. + // Keeps track of the fingerprint/offset for every n series + fingerprintOffsets index.FingerprintOffsets + + // Hold last series to validate that clients insert new series in order. + lastSeries phlaremodel.Labels + lastSeriesHash uint64 + lastRef storage.SeriesRef + + crc32 hash.Hash + + Version int +} + +// TOC represents index Table Of Content that states where each section of index starts. +type TOC struct { + Symbols uint64 + Series uint64 + LabelIndices uint64 + LabelIndicesTable uint64 + Postings uint64 + PostingsTable uint64 + FingerprintOffsets uint64 + Metadata Metadata +} + +// Metadata is TSDB-level metadata +type Metadata struct { + From, Through int64 + Checksum uint32 +} + +func (m *Metadata) EnsureBounds(from, through int64) { + if m.From == 0 || from < m.From { + m.From = from + } + + if m.Through == 0 || through > m.Through { + m.Through = through + } +} + +// NewTOCFromByteSlice return parsed TOC from given index byte slice. +func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { + if bs.Len() < indexTOCLen { + return nil, tsdb_enc.ErrInvalidSize + } + b := bs.Range(bs.Len()-indexTOCLen, bs.Len()) + + expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) + d := encoding.DecWrap(tsdb_enc.Decbuf{B: b[:len(b)-4]}) + if d.Crc32(castagnoliTable) != expCRC { + return nil, errors.Wrap(tsdb_enc.ErrInvalidChecksum, "read TOC") + } + + if err := d.Err(); err != nil { + return nil, err + } + + return &TOC{ + Symbols: d.Be64(), + Series: d.Be64(), + LabelIndices: d.Be64(), + LabelIndicesTable: d.Be64(), + Postings: d.Be64(), + PostingsTable: d.Be64(), + FingerprintOffsets: d.Be64(), + Metadata: Metadata{ + From: d.Be64int64(), + Through: d.Be64int64(), + Checksum: expCRC, + }, + }, nil +} + +// NewWriter returns a new Writer to the given filename. It serializes data in format version 2. +func NewWriter(ctx context.Context, bufferSize int) (*Writer, error) { + iw := &Writer{ + ctx: ctx, + f: GetBufferWriterFromPool(), + fP: GetBufferWriterFromPool(), + fPO: GetBufferWriterFromPool(), + stage: idxStageNone, + + // Reusable memory. + buf1: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, bufferSize)}), + buf2: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, bufferSize)}), + + symbolCache: make(map[string]symbolCacheEntry, 1<<8), + labelNames: make(map[string]uint64, 1<<8), + crc32: newCRC32(), + } + if err := iw.writeMeta(); err != nil { + return nil, err + } + return iw, nil +} + +func (w *Writer) write(bufs ...[]byte) error { + return w.f.Write(bufs...) +} + +func (w *Writer) writeAt(buf []byte, pos uint64) error { + return w.f.WriteAt(buf, pos) +} + +func (w *Writer) addPadding(size int) error { + return w.f.AddPadding(size) +} + +// ensureStage handles transitions between write stages and ensures that IndexWriter +// methods are called in an order valid for the implementation. +func (w *Writer) ensureStage(s indexWriterStage) error { + select { + case <-w.ctx.Done(): + return w.ctx.Err() + default: + } + + if w.stage == s { + return nil + } + if w.stage < s-1 { + // A stage has been skipped. + if err := w.ensureStage(s - 1); err != nil { + return err + } + } + if w.stage > s { + return errors.Errorf("invalid stage %q, currently at %q", s, w.stage) + } + + // Mark start of sections in table of contents. + switch s { + case idxStageSymbols: + w.toc.Symbols = w.f.pos + if err := w.startSymbols(); err != nil { + return err + } + case idxStageSeries: + if err := w.finishSymbols(); err != nil { + return err + } + w.toc.Series = w.f.pos + + case idxStageDone: + w.toc.LabelIndices = w.f.pos + // LabelIndices generation depends on the posting offset + // table produced at this stage. + if err := w.writePostingsToTmpFiles(); err != nil { + return err + } + if err := w.writeLabelIndices(); err != nil { + return err + } + + w.toc.Postings = w.f.pos + if err := w.writePostings(); err != nil { + return err + } + + w.toc.LabelIndicesTable = w.f.pos + if err := w.writeLabelIndexesOffsetTable(); err != nil { + return err + } + + w.toc.PostingsTable = w.f.pos + if err := w.writePostingsOffsetTable(); err != nil { + return err + } + + w.toc.FingerprintOffsets = w.f.pos + if err := w.writeFingerprintOffsetsTable(); err != nil { + return err + } + + if err := w.writeTOC(); err != nil { + return err + } + } + + w.stage = s + return nil +} + +func (w *Writer) writeMeta() error { + w.buf1.Reset() + w.buf1.PutBE32(MagicIndex) + w.buf1.PutByte(FormatV2) + + return w.write(w.buf1.Get()) +} + +// AddSeries adds the series one at a time along with its chunks. +// Requires a specific fingerprint to be passed in the case where the "desired" +// fingerprint differs from what labels.Hash() produces. For example, +// multitenant TSDBs embed a tenant label, but the actual series has no such +// label and so the derived fingerprint differs. +func (w *Writer) AddSeries(ref storage.SeriesRef, lset phlaremodel.Labels, fp model.Fingerprint, chunks ...index.ChunkMeta) error { + if err := w.ensureStage(idxStageSeries); err != nil { + return err + } + + // Put the supplied fingerprint instead of the calculated hash. + // This allows us to have a synthetic label (__loki_tenant__) in + // the pre-compacted TSDBs which map to fingerprints (and chunks) + // without this label in storage. + labelHash := uint64(fp) + + if ref < w.lastRef && len(w.lastSeries) != 0 { + return errors.Errorf("series with reference greater than %d already added", ref) + } + // We add padding to 16 bytes to increase the addressable space we get through 4 byte + // series references. + if err := w.addPadding(16); err != nil { + return errors.Errorf("failed to write padding bytes: %v", err) + } + + if w.f.pos%16 != 0 { + return errors.Errorf("series write not 16-byte aligned at %d", w.f.pos) + } + + w.buf2.Reset() + w.buf2.PutBE64(labelHash) + w.buf2.PutUvarint(len(lset)) + + for _, l := range lset { + var err error + cacheEntry, ok := w.symbolCache[l.Name] + nameIndex := cacheEntry.index + if !ok { + nameIndex, err = w.symbols.ReverseLookup(l.Name) + if err != nil { + return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + } + } + w.labelNames[l.Name]++ + w.buf2.PutUvarint32(nameIndex) + + valueIndex := cacheEntry.lastValueIndex + if !ok || cacheEntry.lastValue != l.Value { + valueIndex, err = w.symbols.ReverseLookup(l.Value) + if err != nil { + return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + } + w.symbolCache[l.Name] = symbolCacheEntry{ + index: nameIndex, + lastValue: l.Value, + lastValueIndex: valueIndex, + } + } + w.buf2.PutUvarint32(valueIndex) + } + + w.buf2.PutUvarint(len(chunks)) + + if len(chunks) > 0 { + c := chunks[0] + w.toc.Metadata.EnsureBounds(c.MinTime, c.MaxTime) + + w.buf2.PutVarint64(c.MinTime) + w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime)) + w.buf2.PutUvarint32(c.KB) + w.buf2.PutUvarint32(c.SeriesIndex) + w.buf2.PutBE32(c.Checksum) + t0 := c.MaxTime + + for _, c := range chunks[1:] { + w.toc.Metadata.EnsureBounds(c.MinTime, c.MaxTime) + // Encode the diff against previous chunk as varint + // instead of uvarint because chunks may overlap + w.buf2.PutVarint64(c.MinTime - t0) + w.buf2.PutUvarint64(uint64(c.MaxTime - c.MinTime)) + w.buf2.PutUvarint32(c.KB) + w.buf2.PutUvarint32(c.SeriesIndex) + t0 = c.MaxTime + + w.buf2.PutBE32(c.Checksum) + } + } + + w.buf1.Reset() + w.buf1.PutUvarint(w.buf2.Len()) + + w.buf2.PutHash(w.crc32) + + w.lastSeries = append(w.lastSeries[:0], lset...) + w.lastSeriesHash = labelHash + w.lastRef = ref + + if ref%fingerprintInterval == 0 { + sRef := w.f.pos / 16 + w.fingerprintOffsets = append(w.fingerprintOffsets, [2]uint64{sRef, labelHash}) + } + + if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil { + return errors.Wrap(err, "write series data") + } + + return nil +} + +func (w *Writer) startSymbols() error { + // We are at w.toc.Symbols. + // Leave 4 bytes of space for the length, and another 4 for the number of symbols + // which will both be calculated later. + return w.write([]byte("alenblen")) +} + +func (w *Writer) AddSymbol(sym string) error { + if err := w.ensureStage(idxStageSymbols); err != nil { + return err + } + if w.numSymbols != 0 && sym <= w.lastSymbol { + return errors.Errorf("symbol %q out-of-order", sym) + } + w.lastSymbol = sym + w.numSymbols++ + w.buf1.Reset() + w.buf1.PutUvarintStr(sym) + return w.write(w.buf1.Get()) +} + +func (w *Writer) finishSymbols() error { + symbolTableSize := w.f.pos - w.toc.Symbols - 4 + // The symbol table's part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1 + if symbolTableSize > math.MaxUint32 { + return errors.Errorf("symbol table size exceeds 4 bytes: %d", symbolTableSize) + } + + // Write out the length and symbol count. + w.buf1.Reset() + w.buf1.PutBE32int(int(symbolTableSize)) + w.buf1.PutBE32int(w.numSymbols) + if err := w.writeAt(w.buf1.Get(), w.toc.Symbols); err != nil { + return err + } + + hashPos := w.f.pos + // Leave space for the hash. We can only calculate it + // now that the number of symbols is known, so mmap and do it from there. + if err := w.write([]byte("hash")); err != nil { + return err + } + if err := w.f.Flush(); err != nil { + return err + } + + //sf, err := fileutil.OpenMmapFile(w.f.name) + buf, sf, err := w.f.Buffer() + if err != nil { + return err + } + w.symbolFile = sf + hash := crc32.Checksum(buf[w.toc.Symbols+4:hashPos], castagnoliTable) + w.buf1.Reset() + w.buf1.PutBE32(hash) + if err := w.writeAt(w.buf1.Get(), hashPos); err != nil { + return err + } + + // Load in the symbol table efficiently for the rest of the index writing. + w.symbols, err = NewSymbols(RealByteSlice(buf), FormatV2, int(w.toc.Symbols)) + if err != nil { + return errors.Wrap(err, "read symbols") + } + return nil +} + +func (w *Writer) writeLabelIndices() error { + if err := w.fPO.Flush(); err != nil { + return err + } + + // Find all the label values in the tmp posting offset table. + //f, err := fileutil.OpenMmapFile(w.fPO.name) + buf, closer, err := w.fPO.Buffer() + if err != nil { + return err + } + defer closer.Close() + + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(buf), int(w.fPO.pos))) + cnt := w.cntPO + current := []byte{} + values := []uint32{} + for d.Err() == nil && cnt > 0 { + cnt-- + d.Uvarint() // Keycount. + name := d.UvarintBytes() // Label name. + value := yoloString(d.UvarintBytes()) // Label value. + d.Uvarint64() // Offset. + if len(name) == 0 { + continue // All index is ignored. + } + + if !bytes.Equal(name, current) && len(values) > 0 { + // We've reached a new label name. + if err := w.writeLabelIndex(string(current), values); err != nil { + return err + } + values = values[:0] + } + current = name + sid, err := w.symbols.ReverseLookup(value) + if err != nil { + return err + } + values = append(values, sid) + } + if d.Err() != nil { + return d.Err() + } + + // Handle the last label. + if len(values) > 0 { + if err := w.writeLabelIndex(string(current), values); err != nil { + return err + } + } + return nil +} + +func (w *Writer) writeLabelIndex(name string, values []uint32) error { + // Align beginning to 4 bytes for more efficient index list scans. + if err := w.addPadding(4); err != nil { + return err + } + + w.labelIndexes = append(w.labelIndexes, labelIndexHashEntry{ + keys: []string{name}, + offset: w.f.pos, + }) + + startPos := w.f.pos + // Leave 4 bytes of space for the length, which will be calculated later. + if err := w.write([]byte("alen")); err != nil { + return err + } + w.crc32.Reset() + + w.buf1.Reset() + w.buf1.PutBE32int(1) // Number of names. + w.buf1.PutBE32int(len(values)) + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + + for _, v := range values { + w.buf1.Reset() + w.buf1.PutBE32(v) + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + } + + // Write out the length. + w.buf1.Reset() + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("label index size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) + if err := w.writeAt(w.buf1.Get(), startPos); err != nil { + return err + } + + w.buf1.Reset() + w.buf1.PutHashSum(w.crc32) + return w.write(w.buf1.Get()) +} + +// writeLabelIndexesOffsetTable writes the label indices offset table. +func (w *Writer) writeLabelIndexesOffsetTable() error { + startPos := w.f.pos + // Leave 4 bytes of space for the length, which will be calculated later. + if err := w.write([]byte("alen")); err != nil { + return err + } + w.crc32.Reset() + + w.buf1.Reset() + w.buf1.PutBE32int(len(w.labelIndexes)) + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + + for _, e := range w.labelIndexes { + w.buf1.Reset() + w.buf1.PutUvarint(len(e.keys)) + for _, k := range e.keys { + w.buf1.PutUvarintStr(k) + } + w.buf1.PutUvarint64(e.offset) + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + } + // Write out the length. + w.buf1.Reset() + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) + if err := w.writeAt(w.buf1.Get(), startPos); err != nil { + return err + } + + w.buf1.Reset() + w.buf1.PutHashSum(w.crc32) + return w.write(w.buf1.Get()) +} + +// writePostingsOffsetTable writes the postings offset table. +func (w *Writer) writePostingsOffsetTable() error { + // Ensure everything is in the temporary file. + if err := w.fPO.Flush(); err != nil { + return err + } + + startPos := w.f.pos + // Leave 4 bytes of space for the length, which will be calculated later. + if err := w.write([]byte("alen")); err != nil { + return err + } + + // Copy over the tmp posting offset table, however we need to + // adjust the offsets. + adjustment := w.postingsStart + + w.buf1.Reset() + w.crc32.Reset() + w.buf1.PutBE32int(int(w.cntPO)) // Count. + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + + //f, err := fileutil.OpenMmapFile(w.fPO.name) + buf, closer, err := w.fPO.Buffer() + if err != nil { + return err + } + defer func() { + if closer != nil { + closer.Close() + } + }() + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(buf), int(w.fPO.pos))) + cnt := w.cntPO + for d.Err() == nil && cnt > 0 { + w.buf1.Reset() + w.buf1.PutUvarint(d.Uvarint()) // Keycount. + w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label name. + w.buf1.PutUvarintStr(yoloString(d.UvarintBytes())) // Label value. + w.buf1.PutUvarint64(d.Uvarint64() + adjustment) // Offset. + w.buf1.WriteToHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return err + } + cnt-- + } + if d.Err() != nil { + return d.Err() + } + + // Cleanup temporary file. + //if err := f.Close(); err != nil { + // return err + //} + //f = nil + if err := w.fPO.Close(); err != nil { + return err + } + if err := w.fPO.Remove(); err != nil { + return err + } + //w.fPO = nil + + // Write out the length. + w.buf1.Reset() + l := w.f.pos - startPos - 4 + if l > math.MaxUint32 { + return errors.Errorf("postings offset table size exceeds 4 bytes: %d", l) + } + w.buf1.PutBE32int(int(l)) + if err := w.writeAt(w.buf1.Get(), startPos); err != nil { + return err + } + + // Finally write the hash. + w.buf1.Reset() + w.buf1.PutHashSum(w.crc32) + return w.write(w.buf1.Get()) +} + +func (w *Writer) writeFingerprintOffsetsTable() error { + w.buf1.Reset() + w.buf2.Reset() + + w.buf1.PutBE32int(len(w.fingerprintOffsets)) // Count. + // build offsets + for _, x := range w.fingerprintOffsets { + w.buf1.PutBE64(x[0]) // series offset + w.buf1.PutBE64(x[1]) // hash + } + + // write length + ln := w.buf1.Len() + // TODO(owen-d): can remove the uint32 cast in the future + // Had to uint32 wrap these for arm32 builds, which we'll remove in the future. + if uint32(ln) > uint32(math.MaxUint32) { + return errors.Errorf("fingerprint offset size exceeds 4 bytes: %d", ln) + } + + w.buf2.PutBE32int(ln) + if err := w.write(w.buf2.Get()); err != nil { + return err + } + + // write offsets+checksum + w.buf1.PutHash(w.crc32) + if err := w.write(w.buf1.Get()); err != nil { + return errors.Wrap(err, "failure writing fingerprint offsets") + } + return nil +} + +const indexTOCLen = 8*9 + crc32.Size + +func (w *Writer) writeTOC() error { + w.buf1.Reset() + + w.buf1.PutBE64(w.toc.Symbols) + w.buf1.PutBE64(w.toc.Series) + w.buf1.PutBE64(w.toc.LabelIndices) + w.buf1.PutBE64(w.toc.LabelIndicesTable) + w.buf1.PutBE64(w.toc.Postings) + w.buf1.PutBE64(w.toc.PostingsTable) + w.buf1.PutBE64(w.toc.FingerprintOffsets) + + // metadata + w.buf1.PutBE64int64(w.toc.Metadata.From) + w.buf1.PutBE64int64(w.toc.Metadata.Through) + + w.buf1.PutHash(w.crc32) + + return w.write(w.buf1.Get()) +} + +func (w *Writer) writePostingsToTmpFiles() error { + names := make([]string, 0, len(w.labelNames)) + for n := range w.labelNames { + names = append(names, n) + } + sort.Strings(names) + + if err := w.f.Flush(); err != nil { + return err + } + //f, err := fileutil.OpenMmapFile(w.f.name) + buf, closer, err := w.f.Buffer() + if err != nil { + return err + } + defer closer.Close() + + // Write out the special all posting. + offsets := []uint32{} + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(buf), int(w.toc.LabelIndices))) + d.Skip(int(w.toc.Series)) + for d.Len() > 0 { + d.ConsumePadding() + startPos := w.toc.LabelIndices - uint64(d.Len()) + if startPos%16 != 0 { + return errors.Errorf("series not 16-byte aligned at %d", startPos) + } + offsets = append(offsets, uint32(startPos/16)) + // Skip to next series. + x := d.Uvarint() + d.Skip(x + crc32.Size) + if err := d.Err(); err != nil { + return err + } + } + if err := w.writePosting("", "", offsets); err != nil { + return err + } + maxPostings := uint64(len(offsets)) // No label name can have more postings than this. + + for len(names) > 0 { + batchNames := []string{} + var c uint64 + // Try to bunch up label names into one loop, but avoid + // using more memory than a single label name can. + for len(names) > 0 { + if w.labelNames[names[0]]+c > maxPostings { + if c > 0 { + break + } + return fmt.Errorf("corruption detected when writing postings to index: label %q has %d uses, but maxPostings is %d", names[0], w.labelNames[names[0]], maxPostings) + } + batchNames = append(batchNames, names[0]) + c += w.labelNames[names[0]] + names = names[1:] + } + + nameSymbols := map[uint32]string{} + for _, name := range batchNames { + sid, err := w.symbols.ReverseLookup(name) + if err != nil { + return err + } + nameSymbols[sid] = name + } + // Label name -> label value -> positions. + postings := map[uint32]map[uint32][]uint32{} + + d := encoding.DecWrap(tsdb_enc.NewDecbufRaw(RealByteSlice(buf), int(w.toc.LabelIndices))) + d.Skip(int(w.toc.Series)) + for d.Len() > 0 { + d.ConsumePadding() + startPos := w.toc.LabelIndices - uint64(d.Len()) + l := d.Uvarint() // Length of this series in bytes. + startLen := d.Len() + + _ = d.Be64() // skip fingerprint + // See if label names we want are in the series. + numLabels := d.Uvarint() + for i := 0; i < numLabels; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + + if _, ok := nameSymbols[lno]; ok { + if _, ok := postings[lno]; !ok { + postings[lno] = map[uint32][]uint32{} + } + postings[lno][lvo] = append(postings[lno][lvo], uint32(startPos/16)) + } + } + // Skip to next series. + d.Skip(l - (startLen - d.Len()) + crc32.Size) + if err := d.Err(); err != nil { + return err + } + } + + for _, name := range batchNames { + // Write out postings for this label name. + sid, err := w.symbols.ReverseLookup(name) + if err != nil { + return err + } + values := make([]uint32, 0, len(postings[sid])) + for v := range postings[sid] { + values = append(values, v) + } + // Symbol numbers are in order, so the strings will also be in order. + sort.Sort(uint32slice(values)) + for _, v := range values { + value, err := w.symbols.Lookup(v) + if err != nil { + return err + } + if err := w.writePosting(name, value, postings[sid][v]); err != nil { + return err + } + } + } + select { + case <-w.ctx.Done(): + return w.ctx.Err() + default: + } + } + return nil +} + +func (w *Writer) writePosting(name, value string, offs []uint32) error { + // Align beginning to 4 bytes for more efficient postings list scans. + if err := w.fP.AddPadding(4); err != nil { + return err + } + + // Write out postings offset table to temporary file as we go. + w.buf1.Reset() + w.buf1.PutUvarint(2) + w.buf1.PutUvarintStr(name) + w.buf1.PutUvarintStr(value) + w.buf1.PutUvarint64(w.fP.pos) // This is relative to the postings tmp file, not the final index file. + if err := w.fPO.Write(w.buf1.Get()); err != nil { + return err + } + w.cntPO++ + + w.buf1.Reset() + w.buf1.PutBE32int(len(offs)) + + for _, off := range offs { + if off > (1<<32)-1 { + return errors.Errorf("series offset %d exceeds 4 bytes", off) + } + w.buf1.PutBE32(off) + } + + w.buf2.Reset() + l := w.buf1.Len() + // We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there. + if uint(l) > math.MaxUint32 { + return errors.Errorf("posting size exceeds 4 bytes: %d", l) + } + w.buf2.PutBE32int(l) + w.buf1.PutHash(w.crc32) + return w.fP.Write(w.buf2.Get(), w.buf1.Get()) +} + +func (w *Writer) writePostings() error { + // There's padding in the tmp file, make sure it actually works. + if err := w.f.AddPadding(4); err != nil { + return err + } + w.postingsStart = w.f.pos + + // Copy temporary file into main index. + if err := w.fP.Flush(); err != nil { + return err + } + //if _, err := w.fP.f.Seek(0, 0); err != nil { + // return err + //} + // Don't need to calculate a checksum, so can copy directly. + //n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, make([]byte, 1<<20)) + //buf := make([]byte, cap(w.buf1.B)) + //buf := w.buf1.B[:cap(w.buf1.B)] + //n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, buf) + //if err != nil { + // return err + //} + n, err := w.f.ReadFrom(w.fP) + if err != nil { + return err + } + if uint64(n) != w.fP.pos { + return errors.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n) + } + //w.f.pos += uint64(n) + + if err := w.fP.Close(); err != nil { + return err + } + if err := w.fP.Remove(); err != nil { + return err + } + //w.fP = nil + return nil +} + +type uint32slice []uint32 + +func (s uint32slice) Len() int { return len(s) } +func (s uint32slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s uint32slice) Less(i, j int) bool { return s[i] < s[j] } + +type labelIndexHashEntry struct { + keys []string + offset uint64 +} + +func (w *Writer) Close() error { + // Even if this fails, we need to close all the files. + ensureErr := w.ensureStage(idxStageDone) + + if w.symbolFile != nil { + if err := w.symbolFile.Close(); err != nil { + return err + } + } + if w.fP != nil { + if err := w.fP.Close(); err != nil { + return err + } + } + if w.fPO != nil { + if err := w.fPO.Close(); err != nil { + return err + } + } + if err := w.f.Close(); err != nil { + return err + } + // w.f is kept around a bit longer and returned to pool by users + PutBufferWriterToPool(w.fP) + PutBufferWriterToPool(w.fPO) + w.fP = nil + w.fPO = nil + + return ensureErr +} + +// StringIter iterates over a sorted list of strings. +type StringIter interface { + // Next advances the iterator and returns true if another value was found. + Next() bool + + // At returns the value at the current iterator position. + At() string + + // Err returns the last error of the iterator. + Err() error +} + +type Reader struct { + b ByteSlice + toc *TOC + + // Close that releases the underlying resources of the byte slice. + c io.Closer + + // Map of LabelName to a list of some LabelValues's position in the offset table. + // The first and last values for each name are always present. + postings map[string][]postingOffset + // For the v1 format, labelname -> labelvalue -> offset. + postingsV1 map[string]map[string]uint64 + + symbols *Symbols + nameSymbols map[uint32]string // Cache of the label name symbol lookups, + // as there are not many and they are half of all lookups. + + fingerprintOffsets index.FingerprintOffsets + + dec *Decoder + + version int +} + +type postingOffset struct { + value string + off int +} + +// ByteSlice abstracts a byte slice. +type ByteSlice interface { + Len() int + Range(start, end int) []byte +} + +type RealByteSlice []byte + +func (b RealByteSlice) Len() int { + return len(b) +} + +func (b RealByteSlice) Range(start, end int) []byte { + return b[start:end] +} + +func (b RealByteSlice) Sub(start, end int) ByteSlice { + return b[start:end] +} + +// NewReader returns a new index reader on the given byte slice. It automatically +// handles different format versions. +func NewReader(b ByteSlice) (*Reader, error) { + return newReader(b, io.NopCloser(nil)) +} + +type nopCloser struct{} + +func (nopCloser) Close() error { return nil } + +// NewFileReader returns a new index reader against the given index file. +func NewFileReader(path string) (*Reader, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + r, err := newReader(RealByteSlice(b), nopCloser{}) + if err != nil { + return r, err + } + + return r, nil +} + +func newReader(b ByteSlice, c io.Closer) (*Reader, error) { + r := &Reader{ + b: b, + c: c, + postings: map[string][]postingOffset{}, + } + + // Verify header. + if r.b.Len() < HeaderLen { + return nil, errors.Wrap(tsdb_enc.ErrInvalidSize, "index header") + } + if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { + return nil, errors.Errorf("invalid magic number %x", m) + } + r.version = int(r.b.Range(4, 5)[0]) + + if r.version != FormatV1 && r.version != FormatV2 { + return nil, errors.Errorf("unknown index file version %d", r.version) + } + + var err error + r.toc, err = NewTOCFromByteSlice(b) + if err != nil { + return nil, errors.Wrap(err, "read TOC") + } + + r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols)) + if err != nil { + return nil, errors.Wrap(err, "read symbols") + } + + if r.version == FormatV1 { + // Earlier V1 formats don't have a sorted postings offset table, so + // load the whole offset table into memory. + r.postingsV1 = map[string]map[string]uint64{} + if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { + if len(key) != 2 { + return errors.Errorf("unexpected key length for posting table %d", len(key)) + } + if _, ok := r.postingsV1[key[0]]; !ok { + r.postingsV1[key[0]] = map[string]uint64{} + r.postings[key[0]] = nil // Used to get a list of labelnames in places. + } + r.postingsV1[key[0]][key[1]] = off + return nil + }); err != nil { + return nil, errors.Wrap(err, "read postings table") + } + } else { + var lastKey []string + lastOff := 0 + valueCount := 0 + // For the postings offset table we keep every label name but only every nth + // label value (plus the first and last one), to save memory. + if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error { + if len(key) != 2 { + return errors.Errorf("unexpected key length for posting table %d", len(key)) + } + if _, ok := r.postings[key[0]]; !ok { + // Next label name. + r.postings[key[0]] = []postingOffset{} + if lastKey != nil { + // Always include last value for each label name. + r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + } + lastKey = nil + valueCount = 0 + } + if valueCount%symbolFactor == 0 { + r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off}) + lastKey = nil + } else { + lastKey = key + lastOff = off + } + valueCount++ + return nil + }); err != nil { + return nil, errors.Wrap(err, "read postings table") + } + if lastKey != nil { + r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff}) + } + // Trim any extra space in the slices. + for k, v := range r.postings { + l := make([]postingOffset, len(v)) + copy(l, v) + r.postings[k] = l + } + } + + r.nameSymbols = make(map[uint32]string, len(r.postings)) + for k := range r.postings { + if k == "" { + continue + } + off, err := r.symbols.ReverseLookup(k) + if err != nil { + return nil, errors.Wrap(err, "reverse symbol lookup") + } + r.nameSymbols[off] = k + } + + r.fingerprintOffsets, err = readFingerprintOffsetsTable(r.b, r.toc.FingerprintOffsets) + if err != nil { + return nil, errors.Wrap(err, "loading fingerprint offsets") + } + + r.dec = &Decoder{LookupSymbol: r.lookupSymbol} + + return r, nil +} + +// Version returns the file format version of the underlying index. +func (r *Reader) Version() int { + return r.version +} + +// FileInfo returns some general stats about the underlying file +func (r *Reader) FileInfo() block.File { + k, v := index.AllPostingsKey() + postings, err := r.Postings(k, nil, v) + if err != nil { + panic(err) + } + var numSeries uint64 + for postings.Next() { + numSeries++ + } + return block.File{ + RelPath: block.IndexFilename, + SizeBytes: uint64(r.Size()), + TSDB: &block.TSDBFile{ + NumSeries: numSeries, + }, + } +} + +// Range marks a byte range. +type Range struct { + Start, End int64 +} + +// PostingsRanges returns a new map of byte range in the underlying index file +// for all postings lists. +func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { + m := map[labels.Label]Range{} + if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error { + if len(key) != 2 { + return errors.Errorf("unexpected key length for posting table %d", len(key)) + } + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(r.b, int(off), castagnoliTable)) + if d.Err() != nil { + return d.Err() + } + m[labels.Label{Name: key[0], Value: key[1]}] = Range{ + Start: int64(off) + 4, + End: int64(off) + 4 + int64(d.Len()), + } + return nil + }); err != nil { + return nil, errors.Wrap(err, "read postings table") + } + return m, nil +} + +type Symbols struct { + bs ByteSlice + version int + off int + + offsets []int + seen int +} + +const symbolFactor = 32 + +// NewSymbols returns a Symbols object for symbol lookups. +func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { + s := &Symbols{ + bs: bs, + version: version, + off: off, + } + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(bs, off, castagnoliTable)) + var ( + origLen = d.Len() + cnt = d.Be32int() + basePos = off + 4 + ) + s.offsets = make([]int, 0, 1+cnt/symbolFactor) + for d.Err() == nil && s.seen < cnt { + if s.seen%symbolFactor == 0 { + s.offsets = append(s.offsets, basePos+origLen-d.Len()) + } + d.UvarintBytes() // The symbol. + s.seen++ + } + if d.Err() != nil { + return nil, d.Err() + } + return s, nil +} + +func (s Symbols) Lookup(o uint32) (string, error) { + d := encoding.DecWrap(tsdb_enc.Decbuf{ + B: s.bs.Range(0, s.bs.Len()), + }) + + if s.version == FormatV2 { + if int(o) >= s.seen { + return "", errors.Errorf("unknown symbol offset %d", o) + } + d.Skip(s.offsets[int(o/symbolFactor)]) + // Walk until we find the one we want. + for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { + d.UvarintBytes() + } + } else { + d.Skip(int(o)) + } + sym := d.UvarintStr() + if d.Err() != nil { + return "", d.Err() + } + return sym, nil +} + +func (s Symbols) ReverseLookup(sym string) (uint32, error) { + if len(s.offsets) == 0 { + return 0, errors.Errorf("unknown symbol %q - no symbols", sym) + } + i := sort.Search(len(s.offsets), func(i int) bool { + // Any decoding errors here will be lost, however + // we already read through all of this at startup. + d := encoding.DecWrap(tsdb_enc.Decbuf{ + B: s.bs.Range(0, s.bs.Len()), + }) + d.Skip(s.offsets[i]) + return yoloString(d.UvarintBytes()) > sym + }) + d := encoding.DecWrap(tsdb_enc.Decbuf{ + B: s.bs.Range(0, s.bs.Len()), + }) + if i > 0 { + i-- + } + d.Skip(s.offsets[i]) + res := i * symbolFactor + var lastLen int + var lastSymbol string + for d.Err() == nil && res <= s.seen { + lastLen = d.Len() + lastSymbol = yoloString(d.UvarintBytes()) + if lastSymbol >= sym { + break + } + res++ + } + if d.Err() != nil { + return 0, d.Err() + } + if lastSymbol != sym { + return 0, errors.Errorf("unknown symbol %q", sym) + } + if s.version == FormatV2 { + return uint32(res), nil + } + return uint32(s.bs.Len() - lastLen), nil +} + +func (s Symbols) Size() int { + return len(s.offsets) * 8 +} + +func (s Symbols) Iter() StringIter { + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(s.bs, s.off, castagnoliTable)) + cnt := d.Be32int() + return &symbolsIter{ + d: d, + cnt: cnt, + } +} + +// symbolsIter implements StringIter. +type symbolsIter struct { + d encoding.Decbuf + cnt int + cur string + err error +} + +func (s *symbolsIter) Next() bool { + if s.cnt == 0 || s.err != nil { + return false + } + s.cur = yoloString(s.d.UvarintBytes()) + s.cnt-- + if s.d.Err() != nil { + s.err = s.d.Err() + return false + } + return true +} + +func (s symbolsIter) At() string { return s.cur } +func (s symbolsIter) Err() error { return s.err } + +// ReadOffsetTable reads an offset table and at the given position calls f for each +// found entry. If f returns an error it stops decoding and returns the received error. +func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64, int) error) error { + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(bs, int(off), castagnoliTable)) + startLen := d.Len() + cnt := d.Be32() + + for d.Err() == nil && d.Len() > 0 && cnt > 0 { + offsetPos := startLen - d.Len() + keyCount := d.Uvarint() + // The Postings offset table takes only 2 keys per entry (name and value of label), + // and the LabelIndices offset table takes only 1 key per entry (a label name). + // Hence setting the size to max of both, i.e. 2. + keys := make([]string, 0, 2) + + for i := 0; i < keyCount; i++ { + keys = append(keys, d.UvarintStr()) + } + o := d.Uvarint64() + if d.Err() != nil { + break + } + if err := f(keys, o, offsetPos); err != nil { + return err + } + cnt-- + } + return d.Err() +} + +func readFingerprintOffsetsTable(bs ByteSlice, off uint64) (index.FingerprintOffsets, error) { + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(bs, int(off), castagnoliTable)) + cnt := d.Be32() + res := make(index.FingerprintOffsets, 0, int(cnt)) + + for d.Err() == nil && d.Len() > 0 && cnt > 0 { + res = append(res, [2]uint64{d.Be64(), d.Be64()}) + cnt-- + } + + return res, d.Err() +} + +// Close the reader and its underlying resources. +func (r *Reader) Close() error { + return r.c.Close() +} + +func (r *Reader) lookupSymbol(o uint32) (string, error) { + if s, ok := r.nameSymbols[o]; ok { + return s, nil + } + return r.symbols.Lookup(o) +} + +func (r *Reader) Bounds() (int64, int64) { + return r.toc.Metadata.From, r.toc.Metadata.Through +} + +func (r *Reader) Checksum() uint32 { + return r.toc.Metadata.Checksum +} + +// Symbols returns an iterator over the symbols that exist within the index. +func (r *Reader) Symbols() StringIter { + return r.symbols.Iter() +} + +// SymbolTableSize returns the symbol table size in bytes. +func (r *Reader) SymbolTableSize() uint64 { + return uint64(r.symbols.Size()) +} + +// SortedLabelValues returns value tuples that exist for the given label name. +// It is not safe to use the return value beyond the lifetime of the byte slice +// passed into the Reader. +func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + values, err := r.LabelValues(name, matchers...) + if err == nil && r.version == FormatV1 { + sort.Strings(values) + } + return values, err +} + +// LabelValues returns value tuples that exist for the given label name. +// It is not safe to use the return value beyond the lifetime of the byte slice +// passed into the Reader. +// TODO(replay): Support filtering by matchers +func (r *Reader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + if len(matchers) > 0 { + return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + } + + if r.version == FormatV1 { + e, ok := r.postingsV1[name] + if !ok { + return nil, nil + } + values := make([]string, 0, len(e)) + for k := range e { + values = append(values, k) + } + return values, nil + + } + e, ok := r.postings[name] + if !ok { + return nil, nil + } + if len(e) == 0 { + return nil, nil + } + values := make([]string, 0, len(e)*symbolFactor) + + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)) + d.Skip(e[0].off) + lastVal := e[len(e)-1].value + + skip := 0 + for d.Err() == nil { + if skip == 0 { + // These are always the same number of bytes, + // and it's faster to skip than parse. + skip = d.Len() + d.Uvarint() // Keycount. + d.UvarintBytes() // Label name. + skip -= d.Len() + } else { + d.Skip(skip) + } + s := yoloString(d.UvarintBytes()) // Label value. + values = append(values, s) + if s == lastVal { + break + } + d.Uvarint64() // Offset. + } + if d.Err() != nil { + return nil, errors.Wrap(d.Err(), "get postings offset entry") + } + return values, nil +} + +// LabelNamesFor returns all the label names for the series referred to by IDs. +// The names returned are sorted. +func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { + // Gather offsetsMap the name offsetsMap in the symbol table first + offsetsMap := make(map[uint32]struct{}) + for _, id := range ids { + offset := id + // In version 2 series IDs are no longer exact references but series are 16-byte padded + // and the ID is the multiple of 16 of the actual position. + if r.version == FormatV2 { + offset = id * 16 + } + + d := encoding.DecWrap(tsdb_enc.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)) + buf := d.Get() + if d.Err() != nil { + return nil, errors.Wrap(d.Err(), "get buffer for series") + } + + offsets, err := r.dec.LabelNamesOffsetsFor(buf) + if err != nil { + return nil, errors.Wrap(err, "get label name offsets") + } + for _, off := range offsets { + offsetsMap[off] = struct{}{} + } + } + + // Lookup the unique symbols. + names := make([]string, 0, len(offsetsMap)) + for off := range offsetsMap { + name, err := r.lookupSymbol(off) + if err != nil { + return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor") + } + names = append(names, name) + } + + sort.Strings(names) + + return names, nil +} + +// LabelValueFor returns label value for the given label name in the series referred to by ID. +func (r *Reader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { + offset := id + // In version 2 series IDs are no longer exact references but series are 16-byte padded + // and the ID is the multiple of 16 of the actual position. + if r.version == FormatV2 { + offset = id * 16 + } + d := encoding.DecWrap(tsdb_enc.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)) + buf := d.Get() + if d.Err() != nil { + return "", errors.Wrap(d.Err(), "label values for") + } + + value, err := r.dec.LabelValueFor(buf, label) + if err != nil { + return "", storage.ErrNotFound + } + + if value == "" { + return "", storage.ErrNotFound + } + + return value, nil +} + +// Series reads the series with the given ID and writes its labels and chunks into lbls and chks. +func (r *Reader) Series(id storage.SeriesRef, lbls *phlaremodel.Labels, chks *[]index.ChunkMeta) (uint64, error) { + offset := id + // In version 2 series IDs are no longer exact references but series are 16-byte padded + // and the ID is the multiple of 16 of the actual position. + if r.version == FormatV2 { + offset = id * 16 + } + d := encoding.DecWrap(tsdb_enc.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)) + if d.Err() != nil { + return 0, d.Err() + } + + fprint, err := r.dec.Series(d.Get(), lbls, chks, false) + if err != nil { + return 0, errors.Wrap(err, "read series") + } + return fprint, nil +} + +// SeriesBy is like Series but allows to group labels by name. This avoid looking up all label symbols for requested series. +func (r *Reader) SeriesBy(id storage.SeriesRef, lbls *phlaremodel.Labels, chks *[]index.ChunkMeta, by ...string) (uint64, error) { + offset := id + // In version 2 series IDs are no longer exact references but series are 16-byte padded + // and the ID is the multiple of 16 of the actual position. + if r.version == FormatV2 { + offset = id * 16 + } + d := encoding.DecWrap(tsdb_enc.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable)) + if d.Err() != nil { + return 0, d.Err() + } + + fprint, err := r.dec.Series(d.Get(), lbls, chks, true, by...) + if err != nil { + return 0, errors.Wrap(err, "read series") + } + return fprint, nil +} + +func (r *Reader) Postings(name string, shard *index.ShardAnnotation, values ...string) (index.Postings, error) { + if r.version == FormatV1 { + e, ok := r.postingsV1[name] + if !ok { + return index.EmptyPostings(), nil + } + res := make([]index.Postings, 0, len(values)) + for _, v := range values { + postingsOff, ok := e[v] + if !ok { + continue + } + // Read from the postings table. + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)) + _, p, err := r.dec.Postings(d.Get()) + if err != nil { + return nil, errors.Wrap(err, "decode postings") + } + res = append(res, p) + } + return index.Merge(res...), nil + } + + e, ok := r.postings[name] + if !ok { + return index.EmptyPostings(), nil + } + + if len(values) == 0 { + return index.EmptyPostings(), nil + } + + res := make([]index.Postings, 0, len(values)) + skip := 0 + valueIndex := 0 + for valueIndex < len(values) && values[valueIndex] < e[0].value { + // Discard values before the start. + valueIndex++ + } + for valueIndex < len(values) { + value := values[valueIndex] + + i := sort.Search(len(e), func(i int) bool { return e[i].value >= value }) + if i == len(e) { + // We're past the end. + break + } + if i > 0 && e[i].value != value { + // Need to look from previous entry. + i-- + } + // Don't Crc32 the entire postings offset table, this is very slow + // so hope any issues were caught at startup. + d := encoding.DecWrap(tsdb_enc.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil)) + d.Skip(e[i].off) + + // Iterate on the offset table. + var postingsOff uint64 // The offset into the postings table. + for d.Err() == nil { + if skip == 0 { + // These are always the same number of bytes, + // and it's faster to skip than parse. + skip = d.Len() + d.Uvarint() // Keycount. + d.UvarintBytes() // Label name. + skip -= d.Len() + } else { + d.Skip(skip) + } + v := d.UvarintBytes() // Label value. + postingsOff = d.Uvarint64() // Offset. + for string(v) >= value { + if string(v) == value { + // Read from the postings table. + d2 := encoding.DecWrap(tsdb_enc.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)) + _, p, err := r.dec.Postings(d2.Get()) + if err != nil { + return nil, errors.Wrap(err, "decode postings") + } + res = append(res, p) + } + valueIndex++ + if valueIndex == len(values) { + break + } + value = values[valueIndex] + } + if i+1 == len(e) || value >= e[i+1].value || valueIndex == len(values) { + // Need to go to a later postings offset entry, if there is one. + break + } + } + if d.Err() != nil { + return nil, errors.Wrap(d.Err(), "get postings offset entry") + } + } + + merged := index.Merge(res...) + if shard != nil { + return index.NewShardedPostings(merged, *shard, r.fingerprintOffsets), nil + } + + return merged, nil +} + +// Size returns the size of an index file. +func (r *Reader) Size() int64 { + return int64(r.b.Len()) +} + +// LabelNames returns all the unique label names present in the index. +// TODO(twilkie) implement support for matchers +func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { + if len(matchers) > 0 { + return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + } + + labelNames := make([]string, 0, len(r.postings)) + allPostingsKeyName, _ := index.AllPostingsKey() + for name := range r.postings { + //if name == index.allPostingsKey.Name { + if name == allPostingsKeyName { + // This is not from any metric. + continue + } + labelNames = append(labelNames, name) + } + sort.Strings(labelNames) + return labelNames, nil +} + +// Decoder provides decoding methods for the v1 and v2 index file format. +// +// It currently does not contain decoding methods for all entry types but can be extended +// by them if there's demand. +type Decoder struct { + LookupSymbol func(uint32) (string, error) +} + +// Postings returns a postings list for b and its number of elements. +func (dec *Decoder) Postings(b []byte) (int, index.Postings, error) { + d := encoding.DecWrap(tsdb_enc.Decbuf{B: b}) + n := d.Be32int() + l := d.Get() + if d.Err() != nil { + return 0, nil, d.Err() + } + if len(l) != 4*n { + return 0, nil, fmt.Errorf("unexpected postings length, should be %d bytes for %d postings, got %d bytes", 4*n, n, len(l)) + } + return n, index.NewBigEndianPostings(l), nil +} + +// LabelNamesOffsetsFor decodes the offsets of the name symbols for a given series. +// They are returned in the same order they're stored, which should be sorted lexicographically. +func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { + d := encoding.DecWrap(tsdb_enc.Decbuf{B: b}) + _ = d.Be64() // skip fingerprint + k := d.Uvarint() + + offsets := make([]uint32, k) + for i := 0; i < k; i++ { + offsets[i] = uint32(d.Uvarint()) + _ = d.Uvarint() // skip the label value + + if d.Err() != nil { + return nil, errors.Wrap(d.Err(), "read series label offsets") + } + } + + return offsets, d.Err() +} + +// LabelValueFor decodes a label for a given series. +func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) { + d := encoding.DecWrap(tsdb_enc.Decbuf{B: b}) + _ = d.Be64() // skip fingerprint + k := d.Uvarint() + + for i := 0; i < k; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + + if d.Err() != nil { + return "", errors.Wrap(d.Err(), "read series label offsets") + } + + ln, err := dec.LookupSymbol(lno) + if err != nil { + return "", errors.Wrap(err, "lookup label name") + } + + if ln == label { + lv, err := dec.LookupSymbol(lvo) + if err != nil { + return "", errors.Wrap(err, "lookup label value") + } + + return lv, nil + } + } + + return "", d.Err() +} + +// Series decodes a series entry from the given byte slice into lset and chks. +func (dec *Decoder) Series(b []byte, lbls *phlaremodel.Labels, chks *[]index.ChunkMeta, group bool, by ...string) (uint64, error) { + if lbls != nil { + *lbls = (*lbls)[:0] + } + *chks = (*chks)[:0] + + d := encoding.DecWrap(tsdb_enc.Decbuf{B: b}) + + fprint := d.Be64() + k := d.Uvarint() + + for i := 0; i < k; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + + if d.Err() != nil { + return 0, errors.Wrap(d.Err(), "read series label offsets") + } + if lbls == nil { + continue + } + if group && len(by) == 0 { + // If we're grouping by all labels, we don't need to decode them. + continue + } + ln, err := dec.LookupSymbol(lno) + if err != nil { + return 0, errors.Wrap(err, "lookup label name") + } + if group { + var found bool + for _, b := range by { + if b == ln { + found = true + break + } + } + if !found { + continue + } + } + lv, err := dec.LookupSymbol(lvo) + if err != nil { + return 0, errors.Wrap(err, "lookup label value") + } + + *lbls = append(*lbls, &typesv1.LabelPair{Name: ln, Value: lv}) + } + + // Read the chunks meta data. + k = d.Uvarint() + + if k == 0 { + return 0, d.Err() + } + + t0 := d.Varint64() + maxt := int64(d.Uvarint64()) + t0 + kb := uint32(d.Uvarint()) + entries := uint32(d.Uvarint64()) + checksum := d.Be32() + + *chks = append(*chks, index.ChunkMeta{ + Checksum: checksum, + MinTime: t0, + MaxTime: maxt, + KB: kb, + SeriesIndex: entries, + }) + t0 = maxt + + for i := 1; i < k; i++ { + // Decode the diff against previous chunk as varint + // instead of uvarint because chunks may overlap + mint := d.Varint64() + t0 + maxt := int64(d.Uvarint64()) + mint + kb := uint32(d.Uvarint()) + entries := uint32(d.Uvarint64()) + checksum := d.Be32() + t0 = maxt + + if d.Err() != nil { + return 0, errors.Wrapf(d.Err(), "read meta for chunk %d", i) + } + + *chks = append(*chks, index.ChunkMeta{ + Checksum: checksum, + MinTime: mint, + MaxTime: maxt, + KB: kb, + SeriesIndex: entries, + }) + } + return fprint, d.Err() +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func (w *Writer) ReleaseIndexBuffer() *BufferWriter { + res := w.f + w.f = nil + return res +} diff --git a/pkg/experiment/ingester/loki/index/index_test.go b/pkg/experiment/ingester/loki/index/index_test.go new file mode 100644 index 0000000000..be250e1064 --- /dev/null +++ b/pkg/experiment/ingester/loki/index/index_test.go @@ -0,0 +1,597 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package index + +import ( + "context" + "fmt" + "hash/crc32" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.uber.org/goleak" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/encoding" + "github.com/prometheus/prometheus/util/testutil" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/iter" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m, + goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), + goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*defaultPolicy).processItems"), + goleak.IgnoreTopFunction("github.com/dgraph-io/ristretto.(*Cache).processItems"), + ) +} + +type series struct { + l phlaremodel.Labels + chunks []index.ChunkMeta +} + +type mockIndex struct { + series map[storage.SeriesRef]series + // we're forced to use a anonymous struct here because we can't use typesv1.LabelPair as it's not comparable. + postings map[struct{ Name, Value string }][]storage.SeriesRef + symbols map[string]struct{} +} + +func newMockIndex() mockIndex { + allPostingsKeyName, allPostingsKeyValue := index.AllPostingsKey() + ix := mockIndex{ + series: make(map[storage.SeriesRef]series), + postings: make(map[struct{ Name, Value string }][]storage.SeriesRef), + symbols: make(map[string]struct{}), + } + ix.postings[struct { + Name string + Value string + }{allPostingsKeyName, allPostingsKeyValue}] = []storage.SeriesRef{} + return ix +} + +func (m mockIndex) Symbols() (map[string]struct{}, error) { + return m.symbols, nil +} + +func (m mockIndex) AddSeries(ref storage.SeriesRef, l phlaremodel.Labels, chunks ...index.ChunkMeta) error { + allPostingsKeyName, allPostingsKeyValue := index.AllPostingsKey() + + if _, ok := m.series[ref]; ok { + return errors.Errorf("series with reference %d already added", ref) + } + for _, lbl := range l { + m.symbols[lbl.Name] = struct{}{} + m.symbols[lbl.Value] = struct{}{} + if _, ok := m.postings[struct { + Name string + Value string + }{lbl.Name, lbl.Value}]; !ok { + m.postings[struct { + Name string + Value string + }{lbl.Name, lbl.Value}] = []storage.SeriesRef{} + } + m.postings[struct { + Name string + Value string + }{lbl.Name, lbl.Value}] = append(m.postings[struct { + Name string + Value string + }{lbl.Name, lbl.Value}], ref) + } + m.postings[struct { + Name string + Value string + }{allPostingsKeyName, allPostingsKeyValue}] = append(m.postings[struct { + Name string + Value string + }{allPostingsKeyName, allPostingsKeyValue}], ref) + + s := series{l: l} + // Actual chunk data is not stored in the index. + s.chunks = append(s.chunks, chunks...) + m.series[ref] = s + + return nil +} + +func (m mockIndex) Close() error { + return nil +} + +func (m mockIndex) LabelValues(name string) ([]string, error) { + values := []string{} + for l := range m.postings { + if l.Name == name { + values = append(values, l.Value) + } + } + return values, nil +} + +func (m mockIndex) Postings(name string, values ...string) (index.Postings, error) { + p := []index.Postings{} + for _, value := range values { + p = append(p, iter.NewSliceSeekIterator(m.postings[struct { + Name string + Value string + }{Name: name, Value: value}])) + } + return index.Merge(p...), nil +} + +func (m mockIndex) Series(ref storage.SeriesRef, lset *phlaremodel.Labels, chks *[]index.ChunkMeta) error { + s, ok := m.series[ref] + if !ok { + return errors.New("not found") + } + *lset = append((*lset)[:0], s.l...) + *chks = append((*chks)[:0], s.chunks...) + + return nil +} + +func TestIndexRW_Create_Open(t *testing.T) { + + // An empty index must still result in a readable file. + iw, err := NewWriter(context.Background(), BlocksIndexWriterBufSize) + require.NoError(t, err) + require.NoError(t, iw.Close()) + + bytes := iw.ReleaseIndexBuffer().buf.Bytes() + ir, err := NewReader(RealByteSlice(bytes)) + require.NoError(t, err) + require.NoError(t, ir.Close()) + + // Modify magic header must cause open to fail. + //f, err := os.OpenFile(fn, os.O_WRONLY, 0o666) + //require.NoError(t, err) + //err = iw.f.WriteAt([]byte{0, 0}, 0) + bytes[0] = 0 + require.NoError(t, err) + //f.Close() + + //_, err = NewFileReader(dir) + //require.Error(t, err) +} + +func TestIndexRW_Postings(t *testing.T) { + + iw, err := NewWriter(context.Background(), BlocksIndexWriterBufSize) + require.NoError(t, err) + + series := []phlaremodel.Labels{ + phlaremodel.LabelsFromStrings("a", "1", "b", "1"), + phlaremodel.LabelsFromStrings("a", "1", "b", "2"), + phlaremodel.LabelsFromStrings("a", "1", "b", "3"), + phlaremodel.LabelsFromStrings("a", "1", "b", "4"), + } + + require.NoError(t, iw.AddSymbol("1")) + require.NoError(t, iw.AddSymbol("2")) + require.NoError(t, iw.AddSymbol("3")) + require.NoError(t, iw.AddSymbol("4")) + require.NoError(t, iw.AddSymbol("a")) + require.NoError(t, iw.AddSymbol("b")) + + // Postings lists are only written if a series with the respective + // reference was added before. + require.NoError(t, iw.AddSeries(1, series[0], model.Fingerprint(series[0].Hash()))) + require.NoError(t, iw.AddSeries(2, series[1], model.Fingerprint(series[1].Hash()))) + require.NoError(t, iw.AddSeries(3, series[2], model.Fingerprint(series[2].Hash()))) + require.NoError(t, iw.AddSeries(4, series[3], model.Fingerprint(series[3].Hash()))) + + require.NoError(t, iw.Close()) + + ir, err := NewReader(RealByteSlice(iw.ReleaseIndexBuffer().buf.Bytes())) + require.NoError(t, err) + + p, err := ir.Postings("a", nil, "1") + require.NoError(t, err) + + var l phlaremodel.Labels + var c []index.ChunkMeta + + for i := 0; p.Next(); i++ { + _, err := ir.Series(p.At(), &l, &c) + + require.NoError(t, err) + require.Equal(t, 0, len(c)) + require.Equal(t, series[i], l) + } + require.NoError(t, p.Err()) + + // The label indices are no longer used, so test them by hand here. + labelIndices := map[string][]string{} + require.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error { + if len(key) != 1 { + return errors.Errorf("unexpected key length for label indices table %d", len(key)) + } + + d := encoding.NewDecbufAt(ir.b, int(off), castagnoliTable) + vals := []string{} + nc := d.Be32int() + if nc != 1 { + return errors.Errorf("unexpected number of label indices table names %d", nc) + } + for i := d.Be32(); i > 0; i-- { + v, err := ir.lookupSymbol(d.Be32()) + if err != nil { + return err + } + vals = append(vals, v) + } + labelIndices[key[0]] = vals + return d.Err() + })) + require.Equal(t, map[string][]string{ + "a": {"1"}, + "b": {"1", "2", "3", "4"}, + }, labelIndices) + + require.NoError(t, ir.Close()) +} + +func TestPostingsMany(t *testing.T) { + + iw, err := NewWriter(context.Background(), BlocksIndexWriterBufSize) + require.NoError(t, err) + + // Create a label in the index which has 999 values. + symbols := map[string]struct{}{} + series := []phlaremodel.Labels{} + for i := 1; i < 1000; i++ { + v := fmt.Sprintf("%03d", i) + series = append(series, phlaremodel.LabelsFromStrings("i", v, "foo", "bar")) + symbols[v] = struct{}{} + } + symbols["i"] = struct{}{} + symbols["foo"] = struct{}{} + symbols["bar"] = struct{}{} + syms := []string{} + for s := range symbols { + syms = append(syms, s) + } + sort.Strings(syms) + for _, s := range syms { + require.NoError(t, iw.AddSymbol(s)) + } + + sort.Slice(series, func(i, j int) bool { + return series[i].Hash() < series[j].Hash() + }) + + for i, s := range series { + require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s, model.Fingerprint(s.Hash()))) + } + require.NoError(t, iw.Close()) + + ir, err := NewReader(RealByteSlice(iw.ReleaseIndexBuffer().buf.Bytes())) + require.NoError(t, err) + defer func() { require.NoError(t, ir.Close()) }() + + cases := []struct { + in []string + }{ + // Simple cases, everything is present. + {in: []string{"002"}}, + {in: []string{"031", "032", "033"}}, + {in: []string{"032", "033"}}, + {in: []string{"127", "128"}}, + {in: []string{"127", "128", "129"}}, + {in: []string{"127", "129"}}, + {in: []string{"128", "129"}}, + {in: []string{"998", "999"}}, + {in: []string{"999"}}, + // Before actual values. + {in: []string{"000"}}, + {in: []string{"000", "001"}}, + {in: []string{"000", "002"}}, + // After actual values. + {in: []string{"999a"}}, + {in: []string{"999", "999a"}}, + {in: []string{"998", "999", "999a"}}, + // In the middle of actual values. + {in: []string{"126a", "127", "128"}}, + {in: []string{"127", "127a", "128"}}, + {in: []string{"127", "127a", "128", "128a", "129"}}, + {in: []string{"127", "128a", "129"}}, + {in: []string{"128", "128a", "129"}}, + {in: []string{"128", "129", "129a"}}, + {in: []string{"126a", "126b", "127", "127a", "127b", "128", "128a", "128b", "129", "129a", "129b"}}, + } + + for _, c := range cases { + it, err := ir.Postings("i", nil, c.in...) + require.NoError(t, err) + + got := []string{} + var lbls phlaremodel.Labels + var metas []index.ChunkMeta + for it.Next() { + _, err := ir.Series(it.At(), &lbls, &metas) + require.NoError(t, err) + got = append(got, lbls.Get("i")) + } + require.NoError(t, it.Err()) + exp := []string{} + for _, e := range c.in { + if _, ok := symbols[e]; ok && e != "l" { + exp = append(exp, e) + } + } + + // sort expected values by label hash instead of lexicographically by labelset + sort.Slice(exp, func(i, j int) bool { + return labels.FromStrings("i", exp[i], "foo", "bar").Hash() < labels.FromStrings("i", exp[j], "foo", "bar").Hash() + }) + + require.Equal(t, exp, got, fmt.Sprintf("input: %v", c.in)) + } +} + +func TestPersistence_index_e2e(t *testing.T) { + lbls, err := labels.ReadLabels("../../../../phlaredb/tsdb/testdata/20kseries.json", 20000) + require.NoError(t, err) + + flbls := make([]phlaremodel.Labels, len(lbls)) + for i, ls := range lbls { + flbls[i] = make(phlaremodel.Labels, 0, len(ls)) + for _, l := range ls { + flbls[i] = append(flbls[i], &typesv1.LabelPair{Name: l.Name, Value: l.Value}) + } + } + + // Sort labels as the index writer expects series in sorted order by fingerprint. + sort.Slice(flbls, func(i, j int) bool { + return flbls[i].Hash() < flbls[j].Hash() + }) + + symbols := map[string]struct{}{} + for _, lset := range lbls { + for _, l := range lset { + symbols[l.Name] = struct{}{} + symbols[l.Value] = struct{}{} + } + } + + var input index.IndexWriterSeriesSlice + + // Generate ChunkMetas for every label set. + for i, lset := range flbls { + var metas []index.ChunkMeta + + for j := 0; j <= (i % 20); j++ { + metas = append(metas, index.ChunkMeta{ + MinTime: int64(j * 10000), + MaxTime: int64((j + 1) * 10000), + Checksum: rand.Uint32(), + }) + } + input = append(input, &index.IndexWriterSeries{ + Labels: lset, + Chunks: metas, + }) + } + + iw, err := NewWriter(context.Background(), BlocksIndexWriterBufSize) + require.NoError(t, err) + + syms := []string{} + for s := range symbols { + syms = append(syms, s) + } + sort.Strings(syms) + for _, s := range syms { + require.NoError(t, iw.AddSymbol(s)) + } + + // Population procedure as done by compaction. + var ( + postings = index.NewMemPostings() + values = map[string]map[string]struct{}{} + ) + + mi := newMockIndex() + + for i, s := range input { + err = iw.AddSeries(storage.SeriesRef(i), s.Labels, model.Fingerprint(s.Labels.Hash()), s.Chunks...) + require.NoError(t, err) + require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.Labels, s.Chunks...)) + + for _, l := range s.Labels { + valset, ok := values[l.Name] + if !ok { + valset = map[string]struct{}{} + values[l.Name] = valset + } + valset[l.Value] = struct{}{} + } + postings.Add(storage.SeriesRef(i), s.Labels) + } + + err = iw.Close() + require.NoError(t, err) + + ir, err := NewReader(RealByteSlice(iw.ReleaseIndexBuffer().buf.Bytes())) + require.NoError(t, err) + + for p := range mi.postings { + gotp, err := ir.Postings(p.Name, nil, p.Value) + require.NoError(t, err) + + expp, err := mi.Postings(p.Name, p.Value) + require.NoError(t, err) + + var lset, explset phlaremodel.Labels + var chks, expchks []index.ChunkMeta + + for gotp.Next() { + require.True(t, expp.Next()) + + ref := gotp.At() + + _, err := ir.Series(ref, &lset, &chks) + require.NoError(t, err) + + err = mi.Series(expp.At(), &explset, &expchks) + require.NoError(t, err) + require.Equal(t, explset, lset) + require.Equal(t, expchks, chks) + } + require.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value) + require.NoError(t, gotp.Err()) + } + + labelPairs := map[string][]string{} + for l := range mi.postings { + labelPairs[l.Name] = append(labelPairs[l.Name], l.Value) + } + for k, v := range labelPairs { + sort.Strings(v) + + res, err := ir.SortedLabelValues(k) + require.NoError(t, err) + + require.Equal(t, len(v), len(res)) + for i := 0; i < len(v); i++ { + require.Equal(t, v[i], res[i]) + } + } + + gotSymbols := []string{} + it := ir.Symbols() + for it.Next() { + gotSymbols = append(gotSymbols, it.At()) + } + require.NoError(t, it.Err()) + expSymbols := []string{} + for s := range mi.symbols { + expSymbols = append(expSymbols, s) + } + sort.Strings(expSymbols) + require.Equal(t, expSymbols, gotSymbols) + + require.NoError(t, ir.Close()) +} + +func TestDecbufUvarintWithInvalidBuffer(t *testing.T) { + b := RealByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) + + db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable) + require.Error(t, db.Err()) +} + +func TestReaderWithInvalidBuffer(t *testing.T) { + b := RealByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81}) + + _, err := NewReader(b) + require.Error(t, err) +} + +// TestNewFileReaderErrorNoOpenFiles ensures that in case of an error no file remains open. +func TestNewFileReaderErrorNoOpenFiles(t *testing.T) { + dir := testutil.NewTemporaryDirectory("block", t) + + idxName := filepath.Join(dir.Path(), "index") + err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666) + require.NoError(t, err) + + _, err = NewFileReader(idxName) + require.Error(t, err) + + // dir.Close will fail on Win if idxName fd is not closed on error path. + dir.Close() +} + +func TestSymbols(t *testing.T) { + buf := encoding.Encbuf{} + + // Add prefix to the buffer to simulate symbols as part of larger buffer. + buf.PutUvarintStr("something") + + symbolsStart := buf.Len() + buf.PutBE32int(204) // Length of symbols table. + buf.PutBE32int(100) // Number of symbols. + for i := 0; i < 100; i++ { + // i represents index in unicode characters table. + buf.PutUvarintStr(string(rune(i))) // Symbol. + } + checksum := crc32.Checksum(buf.Get()[symbolsStart+4:], castagnoliTable) + buf.PutBE32(checksum) // Check sum at the end. + + s, err := NewSymbols(RealByteSlice(buf.Get()), FormatV2, symbolsStart) + require.NoError(t, err) + + // We store only 4 offsets to symbols. + require.Equal(t, 32, s.Size()) + + for i := 99; i >= 0; i-- { + s, err := s.Lookup(uint32(i)) + require.NoError(t, err) + require.Equal(t, string(rune(i)), s) + } + _, err = s.Lookup(100) + require.Error(t, err) + + for i := 99; i >= 0; i-- { + r, err := s.ReverseLookup(string(rune(i))) + require.NoError(t, err) + require.Equal(t, uint32(i), r) + } + _, err = s.ReverseLookup(string(rune(100))) + require.Error(t, err) + + iter := s.Iter() + i := 0 + for iter.Next() { + require.Equal(t, string(rune(i)), iter.At()) + i++ + } + require.NoError(t, iter.Err()) +} + +func TestDecoder_Postings_WrongInput(t *testing.T) { + _, _, err := (&Decoder{}).Postings([]byte("the cake is a lie")) + require.Error(t, err) +} + +func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) { + w, err := NewWriter(context.Background(), BlocksIndexWriterBufSize) + require.NoError(t, err) + + require.NoError(t, w.AddSymbol("__name__")) + require.NoError(t, w.AddSymbol("metric_1")) + require.NoError(t, w.AddSymbol("metric_2")) + + require.NoError(t, w.AddSeries(0, phlaremodel.LabelsFromStrings("__name__", "metric_1", "__name__", "metric_2"), 0)) + + err = w.Close() + require.Error(t, err) + require.ErrorContains(t, err, "corruption detected when writing postings to index") +} diff --git a/pkg/experiment/ingester/segment.go b/pkg/experiment/ingester/segment.go new file mode 100644 index 0000000000..2043a8b664 --- /dev/null +++ b/pkg/experiment/ingester/segment.go @@ -0,0 +1,540 @@ +package ingester + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "os" + "path" + "path/filepath" + "runtime/pprof" + "slices" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/google/uuid" + "github.com/oklog/ulid" + "github.com/prometheus/common/model" + "github.com/thanos-io/objstore" + + "github.com/grafana/pyroscope/pkg/experiment/ingester/loki/index" + "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + + profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb" + "github.com/grafana/pyroscope/pkg/phlaredb/block" + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" + "github.com/grafana/pyroscope/pkg/tenant" + "github.com/grafana/pyroscope/pkg/util/math" +) + +const pathSegments = "segments" +const pathAnon = tenant.DefaultTenantID +const pathBlock = "block.bin" + +type shardKey uint32 + +type segmentsWriter struct { + segmentDuration time.Duration + phlarectx context.Context + l log.Logger + shards map[shardKey]*shard + shardsLock sync.RWMutex + cfg phlaredb.Config + bucket objstore.Bucket + metastoreClient *metastoreclient.Client + //wg sync.WaitGroup + cancel context.CancelFunc + metrics *segmentMetrics + cancelCtx context.Context +} + +type shard struct { + sw *segmentsWriter + current *segment + currentLock sync.RWMutex + wg sync.WaitGroup + l log.Logger + concatBuf []byte +} + +func (sh *shard) ingest(fn func(head segmentIngest) error) (segmentWaitFlushed, error) { + sh.currentLock.RLock() + s := sh.current + s.inFlightProfiles.Add(1) + sh.currentLock.RUnlock() + defer s.inFlightProfiles.Done() + return s, fn(s) +} + +func (sh *shard) loop(ctx context.Context) { + ticker := time.NewTicker(sh.sw.segmentDuration) + defer ticker.Stop() + for { + select { + case <-ticker.C: + sh.flushSegment(context.Background()) + case <-ctx.Done(): + sh.flushSegment(context.Background()) + return + } + } +} + +func (sh *shard) flushSegment(ctx context.Context) { + sh.currentLock.Lock() + s := sh.current + sh.current = sh.sw.newSegment(sh, s.shard, sh.l) + sh.currentLock.Unlock() + + go func() { // not blocking next ticks in case metastore/s3 latency is high + t1 := time.Now() + s.inFlightProfiles.Wait() + s.debuginfo.waitInflight = time.Since(t1) + + err := s.flush(ctx) + if err != nil { + _ = level.Error(sh.sw.l).Log("msg", "failed to flush segment", "err", err) + } + if s.debuginfo.movedHeads > 0 { + _ = level.Debug(s.l).Log("msg", + "writing segment block done", + "heads-count", len(s.heads), + "heads-moved-count", s.debuginfo.movedHeads, + "inflight-duration", s.debuginfo.waitInflight, + "flush-heads-duration", s.debuginfo.flushHeadsDuration, + "flush-block-duration", s.debuginfo.flushBlockDuration, + "store-meta-duration", s.debuginfo.storeMetaDuration, + "total-duration", time.Since(t1)) + } + }() +} + +func newSegmentWriter(phlarectx context.Context, l log.Logger, metrics *segmentMetrics, cfg phlaredb.Config, bucket objstore.Bucket, segmentDuration time.Duration, metastoreClient *metastoreclient.Client) *segmentsWriter { + ctx, cancelFunc := context.WithCancel(context.Background()) + sw := &segmentsWriter{ + metrics: metrics, + segmentDuration: segmentDuration, + phlarectx: phlarectx, + l: l, + bucket: bucket, + cfg: cfg, + shards: make(map[shardKey]*shard), + metastoreClient: metastoreClient, + cancel: cancelFunc, + cancelCtx: ctx, + } + + return sw +} + +func (sw *segmentsWriter) ingest(shard shardKey, fn func(head segmentIngest) error) (await segmentWaitFlushed, err error) { + sw.shardsLock.RLock() + s, ok := sw.shards[shard] + sw.shardsLock.RUnlock() + if ok { + return s.ingest(fn) + } + + sw.shardsLock.Lock() + s, ok = sw.shards[shard] + if ok { + sw.shardsLock.Unlock() + return s.ingest(fn) + } + + s = sw.newShard(shard) + sw.shards[shard] = s + sw.shardsLock.Unlock() + return s.ingest(fn) +} + +func (sw *segmentsWriter) Stop() error { + sw.l.Log("msg", "stopping segments writer") + sw.cancel() + sw.shardsLock.Lock() + defer sw.shardsLock.Unlock() + for _, s := range sw.shards { + s.wg.Wait() + } + sw.l.Log("msg", "segments writer stopped") + + return nil +} + +func (sw *segmentsWriter) newShard(sk shardKey) *shard { + sl := log.With(sw.l, "shard", fmt.Sprintf("%d", sk)) + sh := &shard{ + sw: sw, + l: sl, + concatBuf: make([]byte, 4*0x1000), + } + sh.current = sw.newSegment(sh, sk, sl) + sh.wg.Add(1) + go func() { + defer sh.wg.Done() + sh.loop(sw.cancelCtx) + }() + return sh +} +func (sw *segmentsWriter) newSegment(sh *shard, sk shardKey, sl log.Logger) *segment { + id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader) + dataPath := path.Join(sw.cfg.DataPath, pathSegments, fmt.Sprintf("%d", sk), pathAnon, id.String()) + s := &segment{ + l: log.With(sl, "segment-id", id.String()), + ulid: id, + heads: make(map[serviceKey]serviceHead), + sw: sw, + sh: sh, + shard: sk, + sshard: fmt.Sprintf("%d", sk), + dataPath: dataPath, + doneChan: make(chan struct{}), + } + return s +} + +func (s *segment) flush(ctx context.Context) error { + t1 := time.Now() + var heads []serviceHead + + defer func() { + s.cleanup() + close(s.doneChan) + s.sw.metrics.flushSegmentDuration.WithLabelValues(s.sshard).Observe(time.Since(t1).Seconds()) + + }() + pprof.Do(ctx, pprof.Labels("segment_op", "flush_heads"), func(ctx context.Context) { + heads = s.flushHeads(ctx) + }) + s.debuginfo.movedHeads = len(heads) + if len(heads) == 0 { + return nil + } + + blockPath, blockMeta, err := s.flushBlock(heads) + if err != nil { + return fmt.Errorf("failed to flush block %s: %w", s.ulid.String(), err) + } + err = s.sw.uploadBlock(blockPath, s) + if err != nil { + return fmt.Errorf("failed to upload block %s: %w", s.ulid.String(), err) + } + err = s.sw.storeMeta(ctx, blockMeta, s) + if err != nil { + return fmt.Errorf("failed to store meta %s: %w", s.ulid.String(), err) + } + return nil +} + +func (s *segment) flushBlock(heads []serviceHead) (string, *metastorev1.BlockMeta, error) { + t1 := time.Now() + meta := &metastorev1.BlockMeta{ + FormatVersion: 1, + Id: s.ulid.String(), + MinTime: 0, + MaxTime: 0, + Shard: uint32(s.shard), + CompactionLevel: 0, + TenantId: "", + TenantServices: make([]*metastorev1.TenantService, 0, len(heads)), + Size: 0, + } + + blockPath := path.Join(s.dataPath, pathBlock) + blockFile, err := os.OpenFile(blockPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644) + if err != nil { + return "", nil, err + } + defer blockFile.Close() + + w := withWriterOffset(blockFile) + + for i, e := range heads { + svc, err := concatSegmentHead(s.sh, e, w) + if err != nil { + _ = level.Error(s.l).Log("msg", "failed to concat segment head", "err", err) + continue + } + if i == 0 { + meta.MinTime = svc.MinTime + meta.MaxTime = svc.MaxTime + } else { + meta.MinTime = math.Min(meta.MinTime, svc.MinTime) + meta.MaxTime = math.Max(meta.MaxTime, svc.MaxTime) + } + s.sw.metrics.headSizeBytes.WithLabelValues(s.sshard, e.key.tenant).Observe(float64(svc.Size)) + meta.TenantServices = append(meta.TenantServices, svc) + } + + meta.Size = uint64(w.offset) + s.debuginfo.flushBlockDuration = time.Since(t1) + return blockPath, meta, nil +} + +func concatSegmentHead(sh *shard, e serviceHead, w *writerOffset) (*metastorev1.TenantService, error) { + tenantServiceOffset := w.offset + b := e.head.Meta() + ptypes := e.head.MustProfileTypeNames() + + profiles, x, symbols := getFilesForSegment(e.head, b) + defer index.PutBufferWriterToPool(x) + + offsets := make([]uint64, 3) + var err error + offsets[0], err = concatFile(w, e.head, profiles, sh.concatBuf) + if err != nil { + return nil, err + } + offsets[1] = uint64(w.offset) + indexBytes, _, _ := x.Buffer() + _, err = w.Write(indexBytes) + if err != nil { + return nil, err + } + offsets[2], err = concatFile(w, e.head, symbols, sh.concatBuf) + if err != nil { + return nil, err + } + + tenantServiceSize := w.offset - tenantServiceOffset + + svc := &metastorev1.TenantService{ + TenantId: e.key.tenant, + Name: e.key.service, + MinTime: int64(b.MinTime), + MaxTime: int64(b.MaxTime), + Size: uint64(tenantServiceSize), + // - 0: profiles.parquet + // - 1: index.tsdb + // - 2: symbols.symdb + TableOfContents: offsets, + ProfileTypes: ptypes, + } + return svc, nil +} + +func (s *segment) flushHeads(ctx context.Context) (moved []serviceHead) { + t1 := time.Now() + defer func() { + s.sw.metrics.flushHeadsDuration.WithLabelValues(s.sshard).Observe(time.Since(t1).Seconds()) + s.debuginfo.flushHeadsDuration = time.Since(t1) + }() + wg := sync.WaitGroup{} + mutex := new(sync.Mutex) + for _, e := range s.heads { + wg.Add(1) + e := e + go func() { + defer wg.Done() + eMoved, err := s.flushHead(ctx, e) + if err != nil { + level.Error(s.l).Log("msg", "failed to flush head", "err", err) + } + if eMoved { + mutex.Lock() + moved = append(moved, e) + mutex.Unlock() + } + }() + } + wg.Wait() + + slices.SortFunc(moved, func(i, j serviceHead) int { + c := strings.Compare(i.key.tenant, j.key.tenant) + if c != 0 { + return c + } + return strings.Compare(i.key.service, j.key.service) + }) + return moved +} + +func (s *segment) flushHead(ctx context.Context, e serviceHead) (moved bool, err error) { + th := time.Now() + if err := e.head.Flush(ctx); err != nil { + s.sw.metrics.flushServiceHeadDuration.WithLabelValues(s.sshard, e.key.tenant).Observe(time.Since(th).Seconds()) + s.sw.metrics.flushServiceHeadError.WithLabelValues(s.sshard, e.key.tenant).Inc() + return false, fmt.Errorf("failed to flush head %v: %w", e.head.BlockID(), err) + } + s.sw.metrics.flushServiceHeadDuration.WithLabelValues(s.sshard, e.key.tenant).Observe(time.Since(th).Seconds()) + stats, _ := json.Marshal(e.head.GetMetaStats()) + level.Debug(s.l).Log( + "msg", "flushed head", + "head", e.head.BlockID(), + "stats", stats, + "head-flush-duration", time.Since(th).String(), + ) + if err := e.head.Move(); err != nil { + if e.head.GetMetaStats().NumSamples == 0 { + _ = level.Debug(s.l).Log("msg", "skipping empty head", "head", e.head.BlockID()) + return false, nil + } + s.sw.metrics.flushServiceHeadError.WithLabelValues(s.sshard, e.key.tenant).Inc() + return false, fmt.Errorf("failed to move head %v: %w", e.head.BlockID(), err) + } + profiles, index, symbols := getFilesForSegment(e.head, e.head.Meta()) + if profiles == nil || index == nil || symbols == nil { + s.sw.metrics.flushServiceHeadError.WithLabelValues(s.sshard, e.key.tenant).Inc() + return false, fmt.Errorf("failed to find files %v %v %v", profiles, index, symbols) + } + return true, nil +} + +type serviceKey struct { + tenant string + service string +} +type serviceHead struct { + key serviceKey + head *phlaredb.Head +} + +type segment struct { + ulid ulid.ULID + shard shardKey + sshard string + inFlightProfiles sync.WaitGroup + heads map[serviceKey]serviceHead + headsLock sync.RWMutex + sw *segmentsWriter + dataPath string + doneChan chan struct{} + l log.Logger + + debuginfo struct { + movedHeads int + waitInflight time.Duration + flushHeadsDuration time.Duration + flushBlockDuration time.Duration + storeMetaDuration time.Duration + } + sh *shard +} + +type segmentIngest interface { + ingest(ctx context.Context, tenantID string, p *profilev1.Profile, id uuid.UUID, labels ...*typesv1.LabelPair) error +} + +type segmentWaitFlushed interface { + waitFlushed(ctx context.Context) error +} + +func (s *segment) waitFlushed(ctx context.Context) error { + select { + case <-ctx.Done(): + return fmt.Errorf("waitFlushed: %s %w", s.ulid.String(), ctx.Err()) + case <-s.doneChan: + return nil + } +} + +func (s *segment) ingest(ctx context.Context, tenantID string, p *profilev1.Profile, id uuid.UUID, labels ...*typesv1.LabelPair) error { + var err error + k := serviceKey{ + tenant: tenantID, + service: phlaremodel.Labels(labels).Get(phlaremodel.LabelNameServiceName), + } + s.sw.metrics.segmentIngestBytes.WithLabelValues(s.sshard, tenantID).Observe(float64(p.SizeVT())) + h, err := s.headForIngest(k) + if err != nil { + return err + } + return h.Ingest(ctx, p, id, labels...) +} + +func (s *segment) headForIngest(k serviceKey) (*phlaredb.Head, error) { + var err error + + s.headsLock.RLock() + h, ok := s.heads[k] + s.headsLock.RUnlock() + if ok { + return h.head, nil + } + + s.headsLock.Lock() + defer s.headsLock.Unlock() + h, ok = s.heads[k] + if ok { + return h.head, nil + } + + cfg := s.sw.cfg + cfg.DataPath = path.Join(s.dataPath) + cfg.SymDBFormat = symdb.FormatV3 + + nh, err := phlaredb.NewHead(s.sw.phlarectx, cfg, noopLimiter{}) + if err != nil { + return nil, err + } + + s.heads[k] = serviceHead{ + key: k, + head: nh, + } + + return nh, nil +} + +func (s *segment) cleanup() { + if err := os.RemoveAll(s.dataPath); err != nil { + _ = level.Error(s.l).Log("msg", "failed to cleanup segment", "err", err, "f", s.dataPath) + } +} + +func (sw *segmentsWriter) uploadBlock(blockPath string, s *segment) error { + t1 := time.Now() + + dst, err := filepath.Rel(sw.cfg.DataPath, blockPath) + if err != nil { + return err + } + if err := objstore.UploadFile(sw.phlarectx, sw.l, sw.bucket, blockPath, dst); err != nil { + return err + } + st, _ := os.Stat(blockPath) + if st != nil { + sw.metrics.segmentBlockSizeBytes.WithLabelValues(s.sshard).Observe(float64(st.Size())) + } + sw.metrics.blockUploadDuration.WithLabelValues(s.sshard).Observe(time.Since(t1).Seconds()) + sw.l.Log("msg", "uploaded block", "path", dst, "time-took", time.Since(t1)) + + return nil +} + +func (sw *segmentsWriter) storeMeta(ctx context.Context, meta *metastorev1.BlockMeta, s *segment) error { + t1 := time.Now() + + _, err := sw.metastoreClient.AddBlock(ctx, &metastorev1.AddBlockRequest{ + Block: meta, + }) + if err != nil { + sw.metrics.storeMetaErrors.WithLabelValues(s.sshard).Inc() + return err + } + sw.metrics.storeMetaDuration.WithLabelValues(s.sshard).Observe(time.Since(t1).Seconds()) + s.debuginfo.storeMetaDuration = time.Since(t1) + return nil +} + +func getFilesForSegment(_ *phlaredb.Head, b *block.Meta) (profiles *block.File, index *index.BufferWriter, symbols *block.File) { + profiles = b.FileByRelPath("profiles.parquet") + // FIXME + // index = head.TSDBIndex() + symbols = b.FileByRelPath("symbols.symdb") + return +} + +type noopLimiter struct{} + +func (noopLimiter) AllowProfile(model.Fingerprint, phlaremodel.Labels, int64) error { return nil } + +func (noopLimiter) Stop() {} diff --git a/pkg/experiment/ingester/segment_metrics.go b/pkg/experiment/ingester/segment_metrics.go new file mode 100644 index 0000000000..70ab1f5bfd --- /dev/null +++ b/pkg/experiment/ingester/segment_metrics.go @@ -0,0 +1,117 @@ +package ingester + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +type segmentMetrics struct { + segmentIngestBytes *prometheus.HistogramVec + segmentBlockSizeBytes *prometheus.HistogramVec + headSizeBytes *prometheus.HistogramVec + storeMetaDuration *prometheus.HistogramVec + segmentFlushWaitDuration *prometheus.HistogramVec + segmentFlushTimeouts *prometheus.CounterVec + storeMetaErrors *prometheus.CounterVec + blockUploadDuration *prometheus.HistogramVec + flushSegmentDuration *prometheus.HistogramVec + flushHeadsDuration *prometheus.HistogramVec + flushServiceHeadDuration *prometheus.HistogramVec + flushServiceHeadError *prometheus.CounterVec +} + +var ( + networkTimingBuckets = prometheus.ExponentialBucketsRange(0.005, 4, 20) + dataTimingBuckets = prometheus.ExponentialBucketsRange(0.001, 1, 20) + segmentFlushWaitBuckets = []float64{.1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2} +) + +func newSegmentMetrics(reg prometheus.Registerer) *segmentMetrics { + + m := &segmentMetrics{ + segmentIngestBytes: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_ingest_bytes", + Buckets: prometheus.ExponentialBucketsRange(10*1024, 15*1024*1024, 20), + }, + []string{"shard", "tenant"}), + segmentBlockSizeBytes: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_block_size_bytes", + Buckets: prometheus.ExponentialBucketsRange(100*1024, 100*1024*1024, 20), + }, + []string{"shard"}), + storeMetaDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_store_meta_duration_seconds", + Buckets: networkTimingBuckets, + }, []string{"shard"}), + blockUploadDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_block_upload_duration_seconds", + Buckets: networkTimingBuckets, + }, []string{"shard"}), + + storeMetaErrors: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "segment_store_meta_errors", + }, []string{"shard"}), + + segmentFlushWaitDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_ingester_wait_duration_seconds", + Buckets: segmentFlushWaitBuckets, + }, []string{"tenant"}), + segmentFlushTimeouts: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "segment_ingester_wait_timeouts", + }, []string{"tenant"}), + flushHeadsDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_flush_heads_duration_seconds", + Buckets: dataTimingBuckets, + }, []string{"shard"}), + flushServiceHeadDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_flush_service_head_duration_seconds", + Buckets: dataTimingBuckets, + }, []string{"shard", "tenant"}), + flushSegmentDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_flush_segment_duration_seconds", + Buckets: networkTimingBuckets, + }, []string{"shard"}), + + flushServiceHeadError: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "segment_flush_service_head_errors", + }, []string{"shard", "tenant"}), + headSizeBytes: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "segment_head_size_bytes", + Buckets: prometheus.ExponentialBucketsRange(10*1024, 100*1024*1024, 30), + }, []string{"shard", "tenant"}), + } + + if reg != nil { + reg.MustRegister(m.segmentIngestBytes) + reg.MustRegister(m.segmentBlockSizeBytes) + reg.MustRegister(m.storeMetaDuration) + reg.MustRegister(m.segmentFlushWaitDuration) + reg.MustRegister(m.segmentFlushTimeouts) + reg.MustRegister(m.storeMetaErrors) + reg.MustRegister(m.blockUploadDuration) + reg.MustRegister(m.flushHeadsDuration) + reg.MustRegister(m.flushServiceHeadDuration) + reg.MustRegister(m.flushServiceHeadError) + reg.MustRegister(m.flushSegmentDuration) + //reg.MustRegister(m.flushSegmentsDuration) + reg.MustRegister(m.headSizeBytes) + } + return m +} diff --git a/pkg/experiment/ingester/writer_offset.go b/pkg/experiment/ingester/writer_offset.go new file mode 100644 index 0000000000..b97f34e52c --- /dev/null +++ b/pkg/experiment/ingester/writer_offset.go @@ -0,0 +1,48 @@ +package ingester + +import ( + "io" + "os" + + "github.com/grafana/pyroscope/pkg/phlaredb" + "github.com/grafana/pyroscope/pkg/phlaredb/block" +) + +type writerOffset struct { + io.Writer + offset int64 + //err error +} + +func withWriterOffset(w io.Writer) *writerOffset { + return &writerOffset{Writer: w} +} + +//func (w *writerOffset) write(p []byte) { +// if w.err == nil { +// n, err := w.Writer.Write(p) +// w.offset += int64(n) +// w.err = err +// } +//} + +func (w *writerOffset) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + w.offset += int64(n) + return n, err +} + +func concatFile(w *writerOffset, h *phlaredb.Head, f *block.File, buf []byte) (uint64, error) { + o := w.offset + fp := h.LocalPathFor(f.RelPath) + file, err := os.Open(fp) + if err != nil { + return 0, err + } + defer file.Close() + _, err = io.CopyBuffer(w, file, buf) + if err != nil { + return 0, err + } + return uint64(o), nil +} diff --git a/pkg/experiment/metastore/client/client.go b/pkg/experiment/metastore/client/client.go new file mode 100644 index 0000000000..7a6a14d16e --- /dev/null +++ b/pkg/experiment/metastore/client/client.go @@ -0,0 +1,103 @@ +package metastoreclient + +import ( + "context" + "flag" + "fmt" + "os" + + "github.com/go-kit/log" + + "github.com/grafana/dskit/grpcclient" + "github.com/grafana/dskit/services" + "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "google.golang.org/grpc" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" +) + +type Config struct { + MetastoreAddress string `yaml:"address"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the gRPC client used to communicate between the query-frontends and the query-schedulers."` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.MetastoreAddress, "metastore.address", "localhost:9095", "") + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("metastore.grpc-client-config", f) +} + +func (cfg *Config) Validate() error { + if cfg.MetastoreAddress == "" { + return fmt.Errorf("metastore.address is required") + } + return cfg.GRPCClientConfig.Validate() +} + +type Client struct { + metastorev1.MetastoreServiceClient + compactorv1.CompactionPlannerClient + service services.Service + conn *grpc.ClientConn + config Config +} + +func New(config Config, logger log.Logger) (c *Client, err error) { + c = &Client{config: config} + c.conn, err = dial(c.config, logger) + if err != nil { + return nil, err + } + c.MetastoreServiceClient = metastorev1.NewMetastoreServiceClient(c.conn) + c.CompactionPlannerClient = compactorv1.NewCompactionPlannerClient(c.conn) + c.service = services.NewIdleService(c.starting, c.stopping) + return c, nil +} + +func (c *Client) Service() services.Service { return c.service } +func (c *Client) starting(context.Context) error { return nil } +func (c *Client) stopping(error) error { return c.conn.Close() } + +func dial(cfg Config, logger log.Logger) (*grpc.ClientConn, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + options, err := cfg.GRPCClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + // TODO: https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto + options = append(options, + grpc.WithDefaultServiceConfig(grpcServiceConfig), + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer())), + ) + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + builder, err := NewGrpcResolverBuilder(logger, cfg.MetastoreAddress) + if err != nil { + return nil, fmt.Errorf("failed to create grpc resolver builder: %w", err) + } + options = append(options, grpc.WithResolvers(builder)) + return grpc.Dial(builder.resolverAddrStub(), options...) + } else { + return grpc.Dial(cfg.MetastoreAddress, options...) + } +} + +const grpcServiceConfig = `{ + "healthCheckConfig": { + "serviceName": "metastore.v1.MetastoreService.RaftLeader" + }, + "loadBalancingPolicy":"round_robin", + "methodConfig": [{ + "name": [{"service": "metastore.v1.MetastoreService"}], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 16, + "InitialBackoff": ".01s", + "MaxBackoff": ".01s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }] +}` diff --git a/pkg/experiment/metastore/client/grpc_endpointslice_resolver.go b/pkg/experiment/metastore/client/grpc_endpointslice_resolver.go new file mode 100644 index 0000000000..b029f03555 --- /dev/null +++ b/pkg/experiment/metastore/client/grpc_endpointslice_resolver.go @@ -0,0 +1,202 @@ +package metastoreclient + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/go-kit/log" + "github.com/prometheus/prometheus/discovery" + promk8s "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/discovery/targetgroup" + "google.golang.org/grpc/resolver" +) + +const GrpcEndpointSLiceResovlerSchema = "metastore-endpointslice" + +type EndpointSliceResolverBuilder struct { + l log.Logger + name string + namespace string + port string +} + +func NewGrpcResolverBuilder(l log.Logger, address string) (*EndpointSliceResolverBuilder, error) { + g := &EndpointSliceResolverBuilder{l: log.With(l, "component", "metastore-grpc-resolver-builder")} + name, namespace, port, err := getEndpointSliceTargetFromDnsTarget(address) + if err != nil { + return nil, fmt.Errorf("failed to parse target: %w", err) + } + g.name = name + g.namespace = namespace + g.port = port + g.l.Log("msg", "created new grpc resolver builder", "name", name, "namespace", namespace, "port", port) + + return g, nil +} + +func (g *EndpointSliceResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + g.l.Log("msg", "building resolver", "target", target, "opts", fmt.Sprintf("%+v", opts)) + rr := &GrpcResolver{w: nil, l: log.With(g.l, "component", "metastore-grpc-resolver", "target", target)} + newWatcher, err := NewEndpointSliceWatcher(g.l, g.name, g.namespace, func(ips []string) { + addresses := make([]resolver.Address, 0, len(ips)) + for _, ip := range ips { + addresses = append(addresses, resolver.Address{Addr: ip + ":" + g.port}) + } + err := cc.UpdateState(resolver.State{Addresses: addresses}) + if err != nil { + rr.l.Log("msg", "failed to update state", "err", err) + } else { + rr.l.Log("msg", "updated state", "addresses", fmt.Sprintf("%+v", addresses)) + } + }) + if err != nil { + return nil, fmt.Errorf("failed to create watcher: %w", err) + } + rr.w = newWatcher + return rr, nil +} + +func (g *EndpointSliceResolverBuilder) Scheme() string { + return GrpcEndpointSLiceResovlerSchema +} + +func (g *EndpointSliceResolverBuilder) resolverAddrStub() string { + return fmt.Sprintf("%s://stub:239", GrpcEndpointSLiceResovlerSchema) +} + +type GrpcResolver struct { + w *EndpointSliceWatcher + l log.Logger +} + +func (g *GrpcResolver) ResolveNow(o resolver.ResolveNowOptions) { + //g.l.Log("msg", "resolve now", "opts", o) +} + +func (g *GrpcResolver) Close() { + g.l.Log("msg", "close") + g.w.Close() +} + +func getEndpointSliceTargetFromDnsTarget(src string) (string, string, string, error) { + + re := regexp.MustCompile("dns:///_grpc._tcp\\.([\\S^.]+)\\.([\\S^.]+)(\\.svc\\.cluster\\.local\\.):([0-9]+)") + all := re.FindSubmatch([]byte(src)) + if len(all) == 0 { + return "", "", "", fmt.Errorf("failed to parse target") + } + name := string(all[1]) + namespace := string(all[2]) + port := string(all[4]) + return name, namespace, port, nil +} + +type EndpointSliceWatcher struct { + l log.Logger + d discovery.Discoverer + ctx context.Context + cancel context.CancelFunc + name string + namespace string + cb func(ips []string) +} + +func (w *EndpointSliceWatcher) watch(up chan []*targetgroup.Group) { + isNeededSlice := func(group *targetgroup.Group) bool { //todo proper selection + //if strings.Contains(group.Source, "endpointslice/"+"pyroscope"+"/"+"pyroscope-micro-services-metastore-headless") { + // return true + //} + //if strings.Contains(group.Source, "endpointslice/"+"profiles-dev-003"+"/"+"pyroscope-metastore-headless") { + // return true + //} + substr := "endpointslice/" + w.namespace + "/" + w.name + w.l.Log("msg", "checking group", "source", group.Source, "substr", substr) + if strings.Contains(group.Source, substr) { + return true + } + return false + } + w.l.Log("msg", "starting watch") + for { + select { + case <-w.ctx.Done(): + w.l.Log("msg", "context done, stopping watch") + return + case groups := <-up: + ipset := make(map[string]string) + for _, group := range groups { + if !isNeededSlice(group) { + w.l.Log("msg", "skipping group", "source", group.Source) + continue + } + w.l.Log("msg", "processing group", "source", group.Source) + for _, target := range group.Targets { + ip := target["__meta_kubernetes_pod_ip"] + ready := target["__meta_kubernetes_pod_ready"] + phase := target["__meta_kubernetes_pod_phase"] + podname := target["__meta_kubernetes_pod_name"] + w.l.Log("msg", "received new target", "tt", fmt.Sprintf(">>%s %s %s<<", ip, phase, ready)) + ipset[string(ip)] = string(podname) + } + } + if len(ipset) == 0 { + continue + } + if w.cb != nil { + ipss := make([]string, 0, len(ipset)) + for k := range ipset { + ipss = append(ipss, k) + } + w.cb(ipss) + } + w.l.Log("msg", "received new target groups", "ips", fmt.Sprintf("%+v", ipset)) + } + } +} + +func (w *EndpointSliceWatcher) Close() error { + w.cancel() + return nil +} + +func NewEndpointSliceWatcher(l log.Logger, name, namespace string, cb func(ips []string)) (*EndpointSliceWatcher, error) { + l = log.With(l, "component", "metastore-watcher") + sdc := &promk8s.SDConfig{ + Role: promk8s.RoleEndpointSlice, + Selectors: []promk8s.SelectorConfig{ + { + Role: promk8s.RoleEndpointSlice, + Label: "app.kubernetes.io/component=metastore", + }, + }, + } + refreshMetrics := discovery.NewRefreshMetrics(nil) + m := sdc.NewDiscovererMetrics(nil, refreshMetrics) + d, err := sdc.NewDiscoverer(discovery.DiscovererOptions{ + Logger: log.With(l, "component", "metastore-watcher-discovery"), + Metrics: m, + HTTPClientOptions: nil, + }) + if err != nil { + l.Log("msg", "failed to create discoverer", "err", err) + return nil, err + } + ctx, cacnel := context.WithCancel(context.Background()) + up := make(chan []*targetgroup.Group) + + l.Log("msg", "starting watcher") + w := &EndpointSliceWatcher{ + d: d, + cancel: cacnel, + ctx: ctx, + l: l, + cb: cb, + name: name, + namespace: namespace, + } + go d.Run(ctx, up) + go w.watch(up) + return w, nil +} diff --git a/pkg/experiment/metastore/compactionpb/compaction.pb.go b/pkg/experiment/metastore/compactionpb/compaction.pb.go new file mode 100644 index 0000000000..0cd0129d38 --- /dev/null +++ b/pkg/experiment/metastore/compactionpb/compaction.pb.go @@ -0,0 +1,403 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: experiment/metastore/compactionpb/compaction.proto + +package compactionpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CompactionStatus int32 + +const ( + CompactionStatus_COMPACTION_STATUS_UNSPECIFIED CompactionStatus = 0 + CompactionStatus_COMPACTION_STATUS_IN_PROGRESS CompactionStatus = 1 + CompactionStatus_COMPACTION_STATUS_SUCCESS CompactionStatus = 2 + CompactionStatus_COMPACTION_STATUS_FAILURE CompactionStatus = 3 +) + +// Enum value maps for CompactionStatus. +var ( + CompactionStatus_name = map[int32]string{ + 0: "COMPACTION_STATUS_UNSPECIFIED", + 1: "COMPACTION_STATUS_IN_PROGRESS", + 2: "COMPACTION_STATUS_SUCCESS", + 3: "COMPACTION_STATUS_FAILURE", + } + CompactionStatus_value = map[string]int32{ + "COMPACTION_STATUS_UNSPECIFIED": 0, + "COMPACTION_STATUS_IN_PROGRESS": 1, + "COMPACTION_STATUS_SUCCESS": 2, + "COMPACTION_STATUS_FAILURE": 3, + } +) + +func (x CompactionStatus) Enum() *CompactionStatus { + p := new(CompactionStatus) + *p = x + return p +} + +func (x CompactionStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CompactionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_experiment_metastore_compactionpb_compaction_proto_enumTypes[0].Descriptor() +} + +func (CompactionStatus) Type() protoreflect.EnumType { + return &file_experiment_metastore_compactionpb_compaction_proto_enumTypes[0] +} + +func (x CompactionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CompactionStatus.Descriptor instead. +func (CompactionStatus) EnumDescriptor() ([]byte, []int) { + return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{0} +} + +type CompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique name of the job. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // List of the input blocks. + Blocks []string `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + // Compaction level (all blocks are the same) + CompactionLevel uint32 `protobuf:"varint,3,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + // The index of the raft command that changed the status of the job. + // Used as a fencing token in conjunction with the lease_expires_at + // field to manage ownership of the compaction job. Any access to the + // job must be guarded by the check: current_index >= raft_log_index. + // If the check fails, the access should be denied. + // + // The index is updated every time the job is assigned to a worker. + RaftLogIndex uint64 `protobuf:"varint,4,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` + // Shard the blocks belong to. + Shard uint32 `protobuf:"varint,5,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional, empty for compaction level 0. + TenantId string `protobuf:"bytes,6,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + Status CompactionStatus `protobuf:"varint,7,opt,name=status,proto3,enum=compaction.CompactionStatus" json:"status,omitempty"` + // The time the compaction job lease expires. If a lease is expired, the + // job is considered abandoned and can be picked up by another worker. + // The expiration check should be done by comparing the timestamp of + // the raft log entry (command that accesses the job) with the value of + // this field. + // + // The lease is extended every time the owner reports a status update. + LeaseExpiresAt int64 `protobuf:"varint,8,opt,name=lease_expires_at,json=leaseExpiresAt,proto3" json:"lease_expires_at,omitempty"` +} + +func (x *CompactionJob) Reset() { + *x = CompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJob) ProtoMessage() {} + +func (x *CompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJob.ProtoReflect.Descriptor instead. +func (*CompactionJob) Descriptor() ([]byte, []int) { + return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{0} +} + +func (x *CompactionJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJob) GetBlocks() []string { + if x != nil { + return x.Blocks + } + return nil +} + +func (x *CompactionJob) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *CompactionJob) GetRaftLogIndex() uint64 { + if x != nil { + return x.RaftLogIndex + } + return 0 +} + +func (x *CompactionJob) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *CompactionJob) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *CompactionJob) GetStatus() CompactionStatus { + if x != nil { + return x.Status + } + return CompactionStatus_COMPACTION_STATUS_UNSPECIFIED +} + +func (x *CompactionJob) GetLeaseExpiresAt() int64 { + if x != nil { + return x.LeaseExpiresAt + } + return 0 +} + +type JobPreQueue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CompactionLevel uint32 `protobuf:"varint,1,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + Shard uint32 `protobuf:"varint,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"` + Blocks []string `protobuf:"bytes,4,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *JobPreQueue) Reset() { + *x = JobPreQueue{} + if protoimpl.UnsafeEnabled { + mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobPreQueue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobPreQueue) ProtoMessage() {} + +func (x *JobPreQueue) ProtoReflect() protoreflect.Message { + mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobPreQueue.ProtoReflect.Descriptor instead. +func (*JobPreQueue) Descriptor() ([]byte, []int) { + return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{1} +} + +func (x *JobPreQueue) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *JobPreQueue) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *JobPreQueue) GetTenant() string { + if x != nil { + return x.Tenant + } + return "" +} + +func (x *JobPreQueue) GetBlocks() []string { + if x != nil { + return x.Blocks + } + return nil +} + +var File_experiment_metastore_compactionpb_compaction_proto protoreflect.FileDescriptor + +var file_experiment_metastore_compactionpb_compaction_proto_rawDesc = []byte{ + 0x0a, 0x32, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x70, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x9f, 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x61, 0x66, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x41, 0x74, 0x22, 0x7e, 0x0a, 0x0b, 0x4a, 0x6f, 0x62, 0x50, 0x72, 0x65, 0x51, 0x75, 0x65, 0x75, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x2a, 0x96, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, + 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x1d, 0x0a, + 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, + 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x03, 0x42, 0xad, 0x01, 0x0a, 0x0e, + 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, + 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, + 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, + 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x43, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xca, 0x02, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xe2, 0x02, 0x16, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_experiment_metastore_compactionpb_compaction_proto_rawDescOnce sync.Once + file_experiment_metastore_compactionpb_compaction_proto_rawDescData = file_experiment_metastore_compactionpb_compaction_proto_rawDesc +) + +func file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP() []byte { + file_experiment_metastore_compactionpb_compaction_proto_rawDescOnce.Do(func() { + file_experiment_metastore_compactionpb_compaction_proto_rawDescData = protoimpl.X.CompressGZIP(file_experiment_metastore_compactionpb_compaction_proto_rawDescData) + }) + return file_experiment_metastore_compactionpb_compaction_proto_rawDescData +} + +var file_experiment_metastore_compactionpb_compaction_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_experiment_metastore_compactionpb_compaction_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_experiment_metastore_compactionpb_compaction_proto_goTypes = []any{ + (CompactionStatus)(0), // 0: compaction.CompactionStatus + (*CompactionJob)(nil), // 1: compaction.CompactionJob + (*JobPreQueue)(nil), // 2: compaction.JobPreQueue +} +var file_experiment_metastore_compactionpb_compaction_proto_depIdxs = []int32{ + 0, // 0: compaction.CompactionJob.status:type_name -> compaction.CompactionStatus + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_experiment_metastore_compactionpb_compaction_proto_init() } +func file_experiment_metastore_compactionpb_compaction_proto_init() { + if File_experiment_metastore_compactionpb_compaction_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*JobPreQueue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_experiment_metastore_compactionpb_compaction_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_experiment_metastore_compactionpb_compaction_proto_goTypes, + DependencyIndexes: file_experiment_metastore_compactionpb_compaction_proto_depIdxs, + EnumInfos: file_experiment_metastore_compactionpb_compaction_proto_enumTypes, + MessageInfos: file_experiment_metastore_compactionpb_compaction_proto_msgTypes, + }.Build() + File_experiment_metastore_compactionpb_compaction_proto = out.File + file_experiment_metastore_compactionpb_compaction_proto_rawDesc = nil + file_experiment_metastore_compactionpb_compaction_proto_goTypes = nil + file_experiment_metastore_compactionpb_compaction_proto_depIdxs = nil +} diff --git a/pkg/experiment/metastore/compactionpb/compaction.proto b/pkg/experiment/metastore/compactionpb/compaction.proto new file mode 100644 index 0000000000..3884b27c36 --- /dev/null +++ b/pkg/experiment/metastore/compactionpb/compaction.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package compaction; + +message CompactionJob { + // Unique name of the job. + string name = 1; + // List of the input blocks. + repeated string blocks = 2; + // Compaction level (all blocks are the same) + uint32 compaction_level = 3; + // The index of the raft command that changed the status of the job. + // Used as a fencing token in conjunction with the lease_expires_at + // field to manage ownership of the compaction job. Any access to the + // job must be guarded by the check: current_index >= raft_log_index. + // If the check fails, the access should be denied. + // + // The index is updated every time the job is assigned to a worker. + uint64 raft_log_index = 4; + // Shard the blocks belong to. + uint32 shard = 5; + // Optional, empty for compaction level 0. + string tenant_id = 6; + CompactionStatus status = 7; + // The time the compaction job lease expires. If a lease is expired, the + // job is considered abandoned and can be picked up by another worker. + // The expiration check should be done by comparing the timestamp of + // the raft log entry (command that accesses the job) with the value of + // this field. + // + // The lease is extended every time the owner reports a status update. + int64 lease_expires_at = 8; +} + +enum CompactionStatus { + COMPACTION_STATUS_UNSPECIFIED = 0; + COMPACTION_STATUS_IN_PROGRESS = 1; + COMPACTION_STATUS_SUCCESS = 2; + COMPACTION_STATUS_FAILURE = 3; +} + +message JobPreQueue { + uint32 compaction_level = 1; + uint32 shard = 2; + string tenant = 3; + repeated string blocks = 4; +} diff --git a/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go b/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go new file mode 100644 index 0000000000..c339e2c9d0 --- /dev/null +++ b/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go @@ -0,0 +1,620 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: experiment/metastore/compactionpb/compaction.proto + +package compactionpb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.LeaseExpiresAt != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LeaseExpiresAt)) + i-- + dAtA[i] = 0x40 + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x38 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x32 + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x28 + } + if m.RaftLogIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) + i-- + dAtA[i] = 0x20 + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x18 + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *JobPreQueue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobPreQueue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *JobPreQueue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x1a + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x10 + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Blocks) > 0 { + for _, s := range m.Blocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + if m.RaftLogIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + l = len(m.TenantId) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.LeaseExpiresAt != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LeaseExpiresAt)) + } + n += len(m.unknownFields) + return n +} + +func (m *JobPreQueue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + l = len(m.Tenant) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Blocks) > 0 { + for _, s := range m.Blocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) + } + m.RaftLogIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftLogIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= CompactionStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseExpiresAt", wireType) + } + m.LeaseExpiresAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeaseExpiresAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobPreQueue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobPreQueue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobPreQueue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/experiment/metastore/metastore.go b/pkg/experiment/metastore/metastore.go new file mode 100644 index 0000000000..7ccc51aa40 --- /dev/null +++ b/pkg/experiment/metastore/metastore.go @@ -0,0 +1,279 @@ +package metastore + +import ( + "context" + "flag" + "fmt" + "net" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/services" + "github.com/hashicorp/raft" + raftwal "github.com/hashicorp/raft-wal" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftleader" + "github.com/grafana/pyroscope/pkg/util/health" +) + +const ( + snapshotsRetain = 3 + walCacheEntries = 512 + transportConnPoolSize = 10 + transportTimeout = 10 * time.Second + + raftTrailingLogs = 18 << 10 + raftSnapshotInterval = 180 * time.Second + raftSnapshotThreshold = 8 << 10 + + metastoreRaftLeaderHealthServiceName = "metastore.v1.MetastoreService.RaftLeader" +) + +type Config struct { + DataDir string `yaml:"data_dir"` + Raft RaftConfig `yaml:"raft"` +} + +type RaftConfig struct { + Dir string `yaml:"dir"` + + BootstrapPeers []string `yaml:"bootstrap_peers"` + BootstrapExpectPeers int `yaml:"bootstrap_expect_peers"` + + ServerID string `yaml:"server_id"` + BindAddress string `yaml:"bind_address"` + AdvertiseAddress string `yaml:"advertise_address"` + + ApplyTimeout time.Duration `yaml:"apply_timeout" doc:"hidden"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + const prefix = "metastore." + f.StringVar(&cfg.DataDir, prefix+"data-dir", "./data-metastore/data", "") + cfg.Raft.RegisterFlags(f) +} + +func (cfg *RaftConfig) RegisterFlags(f *flag.FlagSet) { + const prefix = "metastore.raft." + f.StringVar(&cfg.Dir, prefix+"dir", "./data-metastore/raft", "") + f.Var((*flagext.StringSlice)(&cfg.BootstrapPeers), prefix+"bootstrap-peers", "") + f.IntVar(&cfg.BootstrapExpectPeers, prefix+"bootstrap-expect-peers", 1, "Expected number of peers including the local node.") + f.StringVar(&cfg.BindAddress, prefix+"bind-address", "localhost:9099", "") + f.StringVar(&cfg.ServerID, prefix+"server-id", "localhost:9099", "") + f.StringVar(&cfg.AdvertiseAddress, prefix+"advertise-address", "localhost:9099", "") + f.DurationVar(&cfg.ApplyTimeout, prefix+"apply-timeout", 5*time.Second, "") +} + +type Metastore struct { + service services.Service + metastorev1.MetastoreServiceServer + compactorv1.CompactionPlannerServer + + config Config + logger log.Logger + reg prometheus.Registerer + limits Limits + + // In-memory state. + state *metastoreState + + // Persistent state. + db *boltdb + + // Raft module. + wal *raftwal.WAL + snapshots *raft.FileSnapshotStore + transport *raft.NetworkTransport + raft *raft.Raft + leaderhealth *raftleader.HealthObserver + + logStore raft.LogStore + stableStore raft.StableStore + snapshotStore raft.SnapshotStore + + walDir string + + done chan struct{} + wg sync.WaitGroup + metrics *metastoreMetrics + client *metastoreclient.Client + readySince time.Time +} + +type Limits interface{} + +func New(config Config, limits Limits, logger log.Logger, reg prometheus.Registerer, hs health.Service, client *metastoreclient.Client) (*Metastore, error) { + metrics := newMetastoreMetrics(reg) + m := &Metastore{ + config: config, + logger: logger, + reg: reg, + limits: limits, + db: newDB(config, logger, metrics), + done: make(chan struct{}), + metrics: metrics, + client: client, + } + m.leaderhealth = raftleader.NewRaftLeaderHealthObserver(hs, logger, raftleader.NewMetrics(reg)) + m.state = newMetastoreState(logger, m.db, m.reg) + m.service = services.NewBasicService(m.starting, m.running, m.stopping) + return m, nil +} + +func (m *Metastore) Service() services.Service { return m.service } + +func (m *Metastore) Shutdown() error { + m.shutdownRaft() + m.db.shutdown() + return nil +} + +func (m *Metastore) starting(ctx context.Context) error { + if err := m.db.open(false); err != nil { + return fmt.Errorf("failed to initialize database: %w", err) + } + if err := m.initRaft(); err != nil { + return fmt.Errorf("failed to initialize raft: %w", err) + } + m.wg.Add(1) + go m.cleanupLoop() + return nil +} + +func (m *Metastore) stopping(_ error) error { + close(m.done) + m.wg.Wait() + return m.Shutdown() +} + +func (m *Metastore) running(ctx context.Context) error { + <-ctx.Done() + return nil +} + +func (m *Metastore) initRaft() (err error) { + defer func() { + if err != nil { + // If the initialization fails, initialized components + // should be de-initialized gracefully. + m.shutdownRaft() + } + }() + + hasState, err := m.openRaftStore() + if err != nil { + return err + } + + addr, err := net.ResolveTCPAddr("tcp", m.config.Raft.AdvertiseAddress) + if err != nil { + return err + } + m.transport, err = raft.NewTCPTransport(m.config.Raft.BindAddress, addr, transportConnPoolSize, transportTimeout, os.Stderr) + if err != nil { + return err + } + + config := raft.DefaultConfig() + // TODO: Wrap gokit + // config.Logger + config.LogLevel = "debug" + config.TrailingLogs = raftTrailingLogs + config.SnapshotThreshold = raftSnapshotThreshold + config.SnapshotInterval = raftSnapshotInterval + config.LocalID = raft.ServerID(m.config.Raft.ServerID) + + fsm := newFSM(m.logger, m.db, m.state) + m.raft, err = raft.NewRaft(config, fsm, m.logStore, m.stableStore, m.snapshotStore, m.transport) + if err != nil { + return fmt.Errorf("starting raft node: %w", err) + } + + if !hasState { + _ = level.Warn(m.logger).Log("msg", "no existing state found, trying to bootstrap cluster") + if err = m.bootstrap(); err != nil { + return fmt.Errorf("failed to bootstrap cluster: %w", err) + } + } + + m.leaderhealth.Register(m.raft, metastoreRaftLeaderHealthServiceName) + return nil +} + +func (m *Metastore) openRaftStore() (hasState bool, err error) { + if err = m.createRaftDirs(); err != nil { + return false, err + } + m.wal, err = raftwal.Open(m.walDir) + if err != nil { + return false, fmt.Errorf("failed to open WAL: %w", err) + } + m.snapshots, err = raft.NewFileSnapshotStore(m.config.Raft.Dir, snapshotsRetain, os.Stderr) + if err != nil { + return false, fmt.Errorf("failed to open shapshot store: %w", err) + } + m.logStore = m.wal + m.logStore, _ = raft.NewLogCache(walCacheEntries, m.logStore) + m.stableStore = m.wal + m.snapshotStore = m.snapshots + if hasState, err = raft.HasExistingState(m.logStore, m.stableStore, m.snapshotStore); err != nil { + return hasState, fmt.Errorf("failed to check for existing state: %w", err) + } + return hasState, nil +} + +func (m *Metastore) createRaftDirs() (err error) { + m.walDir = filepath.Join(m.config.Raft.Dir, "wal") + if err = os.MkdirAll(m.walDir, 0755); err != nil { + return fmt.Errorf("WAL dir: %w", err) + } + if err = os.MkdirAll(m.config.Raft.Dir, 0755); err != nil { + return fmt.Errorf("snapshot directory: %w", err) + } + return nil +} + +func (m *Metastore) shutdownRaft() { + if m.raft != nil { + // If raft has been initialized, try to transfer leadership. + // Only after this we remove the leader health observer and + // shutdown the raft. + // There is a chance that client will still be trying to connect + // to this instance, therefore retrying is still required. + if err := m.raft.LeadershipTransfer().Error(); err != nil { + switch { + case errors.Is(err, raft.ErrNotLeader): + // Not a leader, nothing to do. + case strings.Contains(err.Error(), "cannot find peer"): + // It's likely that there's just one node in the cluster. + default: + _ = level.Error(m.logger).Log("msg", "failed to transfer leadership", "err", err) + } + } + m.leaderhealth.Deregister(m.raft, metastoreRaftLeaderHealthServiceName) + if err := m.raft.Shutdown().Error(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to shutdown raft", "err", err) + } + } + if m.transport != nil { + if err := m.transport.Close(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to close transport", "err", err) + } + } + if m.wal != nil { + if err := m.wal.Close(); err != nil { + _ = level.Error(m.logger).Log("msg", "failed to close WAL", "err", err) + } + } +} diff --git a/pkg/experiment/metastore/metastore_boltdb.go b/pkg/experiment/metastore/metastore_boltdb.go new file mode 100644 index 0000000000..6e7755a1b9 --- /dev/null +++ b/pkg/experiment/metastore/metastore_boltdb.go @@ -0,0 +1,288 @@ +package metastore + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "runtime/pprof" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "go.etcd.io/bbolt" +) + +const ( + boltDBFileName = "metastore.boltdb" + boltDBSnapshotName = "metastore_snapshot.boltdb" + boltDBInitialMmapSize = 2 << 30 +) + +type boltdb struct { + logger log.Logger + boltdb *bbolt.DB + config Config + path string + metrics *metastoreMetrics +} + +type snapshot struct { + logger log.Logger + tx *bbolt.Tx + metrics *metastoreMetrics +} + +func newDB(config Config, logger log.Logger, metrics *metastoreMetrics) *boltdb { + return &boltdb{ + logger: logger, + config: config, + metrics: metrics, + } +} + +func (db *boltdb) open(readOnly bool) (err error) { + defer func() { + if err != nil { + // If the initialization fails, initialized components + // should be de-initialized gracefully. + db.shutdown() + } + }() + + if err = os.MkdirAll(db.config.DataDir, 0755); err != nil { + return fmt.Errorf("db dir: %w", err) + } + + if db.path == "" { + db.path = filepath.Join(db.config.DataDir, boltDBFileName) + } + + opts := *bbolt.DefaultOptions + opts.ReadOnly = readOnly + opts.NoSync = true + opts.InitialMmapSize = boltDBInitialMmapSize + if db.boltdb, err = bbolt.Open(db.path, 0644, &opts); err != nil { + return fmt.Errorf("failed to open db: %w", err) + } + + if !readOnly { + err = db.boltdb.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(blockMetadataBucketNameBytes) + if err != nil { + return err + } + _, err = tx.CreateBucketIfNotExists(compactionJobBucketNameBytes) + return err + }) + if err != nil { + return fmt.Errorf("failed to create bucket: %w", err) + } + } + + return nil +} + +func (db *boltdb) shutdown() { + if db.boltdb != nil { + if err := db.boltdb.Close(); err != nil { + _ = level.Error(db.logger).Log("msg", "failed to close database", "err", err) + } + } +} + +func (db *boltdb) restore(snapshot io.Reader) error { + t1 := time.Now() + defer func() { + db.metrics.boltDBRestoreSnapshotDuration.Observe(time.Since(t1).Seconds()) + }() + // Snapshot is a full copy of the database, therefore we copy + // it on disk and use it instead of the current database. + path, err := db.copySnapshot(snapshot) + if err == nil { + // First check the snapshot. + restored := *db + restored.path = path + err = restored.open(true) + // Also check applied index. + restored.shutdown() + } + if err != nil { + return fmt.Errorf("failed to restore snapshot: %w", err) + } + // Note that we do not keep the previous database: in case if the + // snapshot is corrupted, we should try another one. + return db.openSnapshot(path) +} + +func (db *boltdb) copySnapshot(snapshot io.Reader) (path string, err error) { + path = filepath.Join(db.config.DataDir, boltDBSnapshotName) + snapFile, err := os.Create(path) + if err != nil { + return "", err + } + _, err = io.Copy(snapFile, snapshot) + if syncErr := syncFD(snapFile); err == nil { + err = syncErr + } + return path, err +} + +func (db *boltdb) openSnapshot(path string) (err error) { + db.shutdown() + if err = os.Rename(path, db.path); err != nil { + return err + } + if err = syncPath(db.path); err != nil { + return err + } + return db.open(false) +} + +func syncPath(path string) (err error) { + d, err := os.Open(path) + if err != nil { + return err + } + return syncFD(d) +} + +func syncFD(f *os.File) (err error) { + err = f.Sync() + if closeErr := f.Close(); err == nil { + return closeErr + } + return err +} + +func (db *boltdb) createSnapshot() (*snapshot, error) { + s := snapshot{logger: db.logger, metrics: db.metrics} + tx, err := db.boltdb.Begin(false) + if err != nil { + return nil, fmt.Errorf("failed to open a transaction for snapshot: %w", err) + } + s.tx = tx + return &s, nil +} + +func (s *snapshot) Persist(sink raft.SnapshotSink) (err error) { + pprof.Do(context.Background(), pprof.Labels("metastore_op", "persist"), func(ctx context.Context) { + err = s.persist(sink) + }) + return err +} + +func (s *snapshot) persist(sink raft.SnapshotSink) error { + var err error + t1 := time.Now() + _ = s.logger.Log("msg", "persisting snapshot", "sink_id", sink.ID()) + defer func() { + s.metrics.boltDBPersistSnapshotDuration.Observe(time.Since(t1).Seconds()) + s.logger.Log("msg", "persisted snapshot", "sink_id", sink.ID(), "err", err, "duration", time.Since(t1)) + if err != nil { + _ = s.logger.Log("msg", "failed to persist snapshot", "err", err) + if err = sink.Cancel(); err != nil { + _ = s.logger.Log("msg", "failed to cancel snapshot sink", "err", err) + return + } + } + if err = sink.Close(); err != nil { + _ = s.logger.Log("msg", "failed to close sink", "err", err) + } + }() + _ = level.Info(s.logger).Log("msg", "persisting snapshot") + if _, err = s.tx.WriteTo(sink); err != nil { + _ = level.Error(s.logger).Log("msg", "failed to write snapshot", "err", err) + return err + } + return nil +} + +func (s *snapshot) Release() { + if s.tx != nil { + // This is an in-memory rollback, no error expected. + _ = s.tx.Rollback() + } +} + +func getOrCreateSubBucket(parent *bbolt.Bucket, name []byte) (*bbolt.Bucket, error) { + bucket := parent.Bucket(name) + if bucket == nil { + return parent.CreateBucket(name) + } + return bucket, nil +} + +const blockMetadataBucketName = "block_metadata" +const compactionJobBucketName = "compaction_job" + +var blockMetadataBucketNameBytes = []byte(blockMetadataBucketName) +var compactionJobBucketNameBytes = []byte(compactionJobBucketName) + +func getBlockMetadataBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { + mdb := tx.Bucket(blockMetadataBucketNameBytes) + if mdb == nil { + return nil, bbolt.ErrBucketNotFound + } + return mdb, nil +} + +func updateBlockMetadataBucket(tx *bbolt.Tx, name []byte, fn func(*bbolt.Bucket) error) error { + mdb, err := getBlockMetadataBucket(tx) + if err != nil { + return err + } + bucket, err := getOrCreateSubBucket(mdb, name) + if err != nil { + return err + } + return fn(bucket) +} + +// Bucket |Key +// [4:shard]|[block_id] +func keyForBlockMeta(shard uint32, tenant string, id string) (bucket, key []byte) { + k := make([]byte, 4+len(tenant)) + binary.BigEndian.PutUint32(k, shard) + copy(k[4:], tenant) + return k, []byte(id) +} + +func parseBucketName(b []byte) (shard uint32, tenant string, ok bool) { + if len(b) >= 4 { + return binary.BigEndian.Uint32(b), string(b[4:]), true + } + return 0, "", false +} + +func updateCompactionJobBucket(tx *bbolt.Tx, name []byte, fn func(*bbolt.Bucket) error) error { + cdb, err := getCompactionJobBucket(tx) + if err != nil { + return err + } + bucket, err := getOrCreateSubBucket(cdb, name) + if err != nil { + return err + } + return fn(bucket) +} + +// Bucket |Key +// [4:shard]|[job_name] +func keyForCompactionJob(shard uint32, tenant string, jobName string) (bucket, key []byte) { + bucket = make([]byte, 4+len(tenant)) + binary.BigEndian.PutUint32(bucket, shard) + copy(bucket[4:], tenant) + return bucket, []byte(jobName) +} + +func getCompactionJobBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { + cdb := tx.Bucket(compactionJobBucketNameBytes) + if cdb == nil { + return nil, bbolt.ErrBucketNotFound + } + return cdb, nil +} diff --git a/pkg/experiment/metastore/metastore_bootstrap.go b/pkg/experiment/metastore/metastore_bootstrap.go new file mode 100644 index 0000000000..00db8fe508 --- /dev/null +++ b/pkg/experiment/metastore/metastore_bootstrap.go @@ -0,0 +1,129 @@ +package metastore + +import ( + "context" + "errors" + "fmt" + "net" + "slices" + "strings" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/dns" + "github.com/hashicorp/raft" +) + +func (m *Metastore) bootstrap() error { + peers, err := m.bootstrapPeers() + if err != nil { + return fmt.Errorf("failed to resolve peers: %w", err) + } + logger := log.With(m.logger, + "server_id", m.config.Raft.ServerID, + "advertise_address", m.config.Raft.AdvertiseAddress, + "peers", fmt.Sprint(peers)) + lastPeer := peers[len(peers)-1] + if raft.ServerAddress(m.config.Raft.AdvertiseAddress) != lastPeer.Address { + _ = level.Info(logger).Log("msg", "not the bootstrap node, skipping") + return nil + } + _ = level.Info(logger).Log("msg", "bootstrapping raft") + bootstrap := m.raft.BootstrapCluster(raft.Configuration{Servers: peers}) + if bootstrapErr := bootstrap.Error(); bootstrapErr != nil { + if !errors.Is(bootstrapErr, raft.ErrCantBootstrap) { + return fmt.Errorf("failed to bootstrap raft: %w", bootstrapErr) + } + } + return nil +} + +func (m *Metastore) bootstrapPeers() ([]raft.Server, error) { + // The peer list always includes the local node. + peers := make([]raft.Server, 0, len(m.config.Raft.BootstrapPeers)+1) + peers = append(peers, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(m.config.Raft.ServerID), + Address: raft.ServerAddress(m.config.Raft.AdvertiseAddress), + }) + // Note that raft requires stable node IDs, therefore we're using + // the node FQDN:port for both purposes: as the identifier and as the + // address. This requires a DNS SRV record lookup without further + // resolution of A records (dnssrvnoa+). + // + // Alternatively, peers may be specified explicitly in the + // "{addr}" format, where the node is the optional node + // identifier. + var resolve []string + for _, peer := range m.config.Raft.BootstrapPeers { + if strings.Contains(peer, "+") { + resolve = append(resolve, peer) + } else { + peers = append(peers, parsePeer(peer)) + } + } + if len(resolve) > 0 { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + prov := dns.NewProvider(m.logger, m.reg, dns.MiekgdnsResolverType) + if err := prov.Resolve(ctx, resolve); err != nil { + return nil, fmt.Errorf("failed to resolve bootstrap peers: %w", err) + } + resolvedPeers := prov.Addresses() + if len(resolvedPeers) == 0 { + // The local node is the only one in the cluster, but peers + // were supposed to be present. Stop here to avoid bootstrapping + // a single-node cluster. + return nil, fmt.Errorf("bootstrap peers can't be resolved") + } + for _, peer := range resolvedPeers { + peers = append(peers, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(peer), + Address: raft.ServerAddress(peer), + }) + } + } + // Finally, we sort and deduplicate the peers: the first one + // is to boostrap the cluster. If there are nodes with distinct + // IDs but the same address, bootstrapping will fail. + slices.SortFunc(peers, func(a, b raft.Server) int { + return strings.Compare(string(a.ID), string(b.ID)) + }) + peers = slices.CompactFunc(peers, func(a, b raft.Server) bool { + return a.ID == b.ID + }) + if len(peers) != m.config.Raft.BootstrapExpectPeers { + return nil, fmt.Errorf("expected number of bootstrap peers not reached: got %d, expected %d", + len(peers), m.config.Raft.BootstrapExpectPeers) + } + return peers, nil +} + +func parsePeer(raw string) raft.Server { + // The string may be "{addr}" or "{addr}/{node_id}". + parts := strings.SplitN(raw, "/", 2) + var addr string + var node string + if len(parts) == 2 { + addr = parts[0] + node = parts[1] + } else { + addr = raw + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + // No port specified. + host = addr + } + if node == "" { + // No node_id specified. + node = host + } + return raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID(node), + Address: raft.ServerAddress(addr), + } +} diff --git a/pkg/experiment/metastore/metastore_compaction_planner.go b/pkg/experiment/metastore/metastore_compaction_planner.go new file mode 100644 index 0000000000..5ea280f19a --- /dev/null +++ b/pkg/experiment/metastore/metastore_compaction_planner.go @@ -0,0 +1,258 @@ +package metastore + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/bbolt" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" +) + +const ( + jobPollInterval = 5 * time.Second + jobLeaseDuration = 3 * jobPollInterval +) + +var ( + // TODO aleks: for illustration purposes, to be moved externally + globalCompactionStrategy = compactionStrategy{ + levels: map[uint32]compactionLevelStrategy{ + 0: {maxBlocks: 20}, + }, + defaultStrategy: compactionLevelStrategy{ + maxBlocks: 10, + }, + maxCompactionLevel: 3, + // 0: 0.5 + // 1: 10s + // 2: 100s + // 3: 1000s // 16m40s + } +) + +type compactionStrategy struct { + levels map[uint32]compactionLevelStrategy + defaultStrategy compactionLevelStrategy + maxCompactionLevel uint32 +} + +type compactionLevelStrategy struct { + maxBlocks int + maxTotalSizeBytes uint64 +} + +func getStrategyForLevel(compactionLevel uint32) compactionLevelStrategy { + strategy, ok := globalCompactionStrategy.levels[compactionLevel] + if !ok { + strategy = globalCompactionStrategy.defaultStrategy + } + return strategy +} + +func (s compactionLevelStrategy) shouldCreateJob(blocks []string) bool { + // NB: Total block size does not reflect the actual size of the data + // to be read for compaction (at once) or queried. A better heuristic + // would be max tenant service size. + return len(blocks) >= s.maxBlocks +} + +type jobPreQueue struct { + mu sync.Mutex + blocksByLevel map[uint32][]string +} + +func (m *Metastore) GetCompactionJobs(_ context.Context, req *compactorv1.GetCompactionRequest) (*compactorv1.GetCompactionResponse, error) { + return nil, nil +} + +func (m *metastoreState) tryCreateJob(block *metastorev1.BlockMeta, raftLogIndex uint64) *compactionpb.CompactionJob { + key := tenantShard{ + tenant: block.TenantId, + shard: block.Shard, + } + preQueue := m.getOrCreatePreQueue(key) + preQueue.mu.Lock() + defer preQueue.mu.Unlock() + + if block.CompactionLevel >= globalCompactionStrategy.maxCompactionLevel { + level.Info(m.logger).Log("msg", "skipping block at max compaction level", "block", block.Id, "compaction_level", block.CompactionLevel) + return nil + } + + queuedBlocks := append(preQueue.blocksByLevel[block.CompactionLevel], block.Id) + + level.Debug(m.logger).Log( + "msg", "adding block for compaction", + "block", block.Id, + "shard", block.Shard, + "tenant", block.TenantId, + "compaction_level", block.CompactionLevel, + "size", block.Size, + "queue_size", len(queuedBlocks), + "raft_log_index", raftLogIndex) + + strategy := getStrategyForLevel(block.CompactionLevel) + + var job *compactionpb.CompactionJob + if strategy.shouldCreateJob(queuedBlocks) { + blockIds := make([]string, 0, len(queuedBlocks)) + for _, b := range queuedBlocks { + blockIds = append(blockIds, b) + } + job = &compactionpb.CompactionJob{ + Name: fmt.Sprintf("L%d-S%d-%d", block.CompactionLevel, block.Shard, calculateHash(queuedBlocks)), + Blocks: blockIds, + Status: compactionpb.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED, + Shard: block.Shard, + TenantId: block.TenantId, + CompactionLevel: block.CompactionLevel, + } + level.Info(m.logger).Log( + "msg", "created compaction job", + "job", job.Name, + "blocks", len(queuedBlocks), + "shard", block.Shard, + "tenant", block.TenantId, + "compaction_level", block.CompactionLevel) + } + return job +} + +func (m *metastoreState) addCompactionJob(job *compactionpb.CompactionJob) { + level.Debug(m.logger).Log("msg", "adding compaction job to priority queue", "job", job.Name) + if ok := m.compactionJobQueue.enqueue(job); !ok { + level.Warn(m.logger).Log("msg", "a compaction job with this name already exists", "job", job.Name) + return + } + + // reset the pre-queue for this level + key := tenantShard{ + tenant: job.TenantId, + shard: job.Shard, + } + preQueue := m.getOrCreatePreQueue(key) + preQueue.mu.Lock() + defer preQueue.mu.Unlock() + preQueue.blocksByLevel[job.CompactionLevel] = preQueue.blocksByLevel[job.CompactionLevel][:0] +} + +func (m *metastoreState) addBlockToCompactionJobQueue(block *metastorev1.BlockMeta) { + key := tenantShard{ + tenant: block.TenantId, + shard: block.Shard, + } + preQueue := m.getOrCreatePreQueue(key) + preQueue.mu.Lock() + defer preQueue.mu.Unlock() + + preQueue.blocksByLevel[block.CompactionLevel] = append(preQueue.blocksByLevel[block.CompactionLevel], block.Id) +} + +func calculateHash(blocks []string) uint64 { + b := make([]byte, 0, 1024) + for _, blk := range blocks { + b = append(b, blk...) + } + return xxhash.Sum64(b) +} + +type compactionMetrics struct { + addedBlocks *prometheus.CounterVec + deletedBlocks *prometheus.CounterVec + addedJobs *prometheus.CounterVec + assignedJobs *prometheus.CounterVec + completedJobs *prometheus.CounterVec +} + +func newCompactionMetrics(reg prometheus.Registerer) *compactionMetrics { + m := &compactionMetrics{ + addedBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "metastore_compaction_added_blocks_count", + Help: "The number of blocks added for compaction", + }, []string{"shard", "tenant", "level"}), + deletedBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "metastore_compaction_deleted_blocks_count", + Help: "The number of blocks deleted as a result of compaction", + }, []string{"shard", "tenant", "level"}), + addedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "metastore_compaction_added_jobs_count", + Help: "The number of created compaction jobs", + }, []string{"shard", "tenant", "level"}), + assignedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "metastore_compaction_assigned_jobs_count", + Help: "The number of assigned compaction jobs", + }, []string{"shard", "tenant", "level"}), + completedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "pyroscope", + Name: "metastore_compaction_completed_jobs_count", + Help: "The number of completed compaction jobs", + }, []string{"shard", "tenant", "level"}), + } + if reg != nil { + reg.MustRegister( + m.addedBlocks, + m.deletedBlocks, + m.addedJobs, + m.assignedJobs, + m.completedJobs, + ) + } + return m +} + +func (m *metastoreState) consumeBlock(block *metastorev1.BlockMeta, tx *bbolt.Tx, raftLogIndex uint64) (err error, jobToAdd *compactionpb.CompactionJob, blockForQueue *metastorev1.BlockMeta) { + // create and store an optional compaction job + if job := m.tryCreateJob(block, raftLogIndex); job != nil { + level.Debug(m.logger).Log("msg", "persisting compaction job", "job", job.Name) + jobBucketName, jobKey := keyForCompactionJob(block.Shard, block.TenantId, job.Name) + err := updateCompactionJobBucket(tx, jobBucketName, func(bucket *bbolt.Bucket) error { + data, _ := job.MarshalVT() + return bucket.Put(jobKey, data) + }) + if err != nil { + return err, nil, nil + } + err = m.persistJobPreQueue(block.Shard, block.TenantId, block.CompactionLevel, []string{}, tx) + jobToAdd = job + } else { + key := tenantShard{ + tenant: block.TenantId, + shard: block.Shard, + } + queue := m.getOrCreatePreQueue(key).blocksByLevel[block.CompactionLevel] + queue = append(queue, block.Id) + err := m.persistJobPreQueue(block.Shard, block.TenantId, block.CompactionLevel, queue, tx) + if err != nil { + return err, nil, nil + } + blockForQueue = block + } + return err, jobToAdd, blockForQueue +} + +func (m *metastoreState) persistJobPreQueue(shard uint32, tenant string, compactionLevel uint32, queue []string, tx *bbolt.Tx) error { + jobBucketName, _ := keyForCompactionJob(shard, tenant, "") + preQueue := &compactionpb.JobPreQueue{ + CompactionLevel: compactionLevel, + Shard: shard, + Tenant: tenant, + Blocks: queue, + } + key := []byte(fmt.Sprintf("job-pre-queue-%d", compactionLevel)) + return updateCompactionJobBucket(tx, jobBucketName, func(bucket *bbolt.Bucket) error { + data, _ := preQueue.MarshalVT() + return bucket.Put(key, data) + }) +} diff --git a/pkg/experiment/metastore/metastore_compaction_queue.go b/pkg/experiment/metastore/metastore_compaction_queue.go new file mode 100644 index 0000000000..ceb0b5ce21 --- /dev/null +++ b/pkg/experiment/metastore/metastore_compaction_queue.go @@ -0,0 +1,210 @@ +package metastore + +import ( + "container/heap" + "slices" + "strings" + "sync" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" +) + +// A priority queue for compaction jobs. Jobs are prioritized by the compaction +// level, and the deadline time. +// +// The queue is supposed to be used by the compaction planner to schedule jobs. +// +// Compaction workers own jobs while they are in progress. Ownership handling is +// implemented using lease deadlines and fencing tokens: +// https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html + +type jobQueue struct { + mu sync.Mutex + jobs map[string]*jobQueueEntry + pq priorityQueue + + lease int64 +} + +// newJobQueue creates a new job queue with the given lease duration. +// +// Typically, callers should update jobs at the interval not exceeding +// the half of the lease duration. +func newJobQueue(lease int64) *jobQueue { + pq := make(priorityQueue, 0) + heap.Init(&pq) + return &jobQueue{ + jobs: make(map[string]*jobQueueEntry), + pq: pq, + lease: lease, + } +} + +type jobQueueEntry struct { + // The index of the job in the heap. + index int + // The original proto message. + *compactionpb.CompactionJob +} + +func (c *jobQueueEntry) less(x *jobQueueEntry) bool { + if c.Status != x.Status { + // Peek jobs in the "initial" (unspecified) state first. + return c.Status < x.Status + } + if c.LeaseExpiresAt != x.LeaseExpiresAt { + // Jobs with earlier deadlines should be at the top. + return c.LeaseExpiresAt < x.LeaseExpiresAt + } + // Compact lower level jobs first. + if c.CompactionLevel != x.CompactionLevel { + // Jobs with earlier deadlines should be at the top. + return c.CompactionLevel < x.CompactionLevel + } + return c.Name < x.Name +} + +func (q *jobQueue) dequeue(now int64, raftLogIndex uint64) *compactionpb.CompactionJob { + q.mu.Lock() + defer q.mu.Unlock() + for q.pq.Len() > 0 { + job := q.pq[0] + if job.Status == compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS && + now <= job.LeaseExpiresAt { + // If the top job is in progress and not expired, stop checking further + return nil + } + // Actually remove it from the heap, update and push it back. + heap.Pop(&q.pq) + job.LeaseExpiresAt = q.getNewDeadline(now) + job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS + // If job.status is "in progress", the ownership of the job is being revoked. + job.RaftLogIndex = raftLogIndex + heap.Push(&q.pq, job) + return job.CompactionJob + } + return nil +} + +func (q *jobQueue) update(name string, now int64, raftLogIndex uint64) bool { + q.mu.Lock() + defer q.mu.Unlock() + if job, exists := q.jobs[name]; exists { + if job.RaftLogIndex > raftLogIndex { + return false + } + job.LeaseExpiresAt = q.getNewDeadline(now) + job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS + // De-prioritize the job, as the deadline has been postponed. + heap.Fix(&q.pq, job.index) + return true + } + return false +} + +func (q *jobQueue) getNewDeadline(now int64) int64 { + return now + q.lease +} + +func (q *jobQueue) isOwner(name string, raftLogIndex uint64) bool { + q.mu.Lock() + defer q.mu.Unlock() + if job, exists := q.jobs[name]; exists { + if job.RaftLogIndex > raftLogIndex { + return false + } + } + return true +} + +func (q *jobQueue) evict(name string, raftLogIndex uint64) bool { + q.mu.Lock() + defer q.mu.Unlock() + if job, exists := q.jobs[name]; exists { + if job.RaftLogIndex > raftLogIndex { + return false + } + delete(q.jobs, name) + heap.Remove(&q.pq, job.index) + } + return true +} + +func (q *jobQueue) enqueue(job *compactionpb.CompactionJob) bool { + q.mu.Lock() + defer q.mu.Unlock() + if _, exists := q.jobs[job.Name]; exists { + return false + } + j := &jobQueueEntry{CompactionJob: job} + q.jobs[job.Name] = j + heap.Push(&q.pq, j) + return true +} + +func (q *jobQueue) putJob(job *compactionpb.CompactionJob) { + q.jobs[job.Name] = &jobQueueEntry{CompactionJob: job} +} + +func (q *jobQueue) rebuild() { + q.pq = slices.Grow(q.pq[0:], len(q.jobs)) + for _, job := range q.jobs { + q.pq = append(q.pq, job) + } + heap.Init(&q.pq) +} + +func (q *jobQueue) stats() (int, string, string, string, string) { + q.mu.Lock() + defer q.mu.Unlock() + + newJobs := make([]string, 0) + inProgressJobs := make([]string, 0) + completedJobs := make([]string, 0) + failedJobs := make([]string, 0) + for _, job := range q.jobs { + switch job.Status { + case compactionpb.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED: + newJobs = append(newJobs, job.Name) + case compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS: + inProgressJobs = append(inProgressJobs, job.Name) + case compactionpb.CompactionStatus_COMPACTION_STATUS_SUCCESS: + completedJobs = append(completedJobs, job.Name) + case compactionpb.CompactionStatus_COMPACTION_STATUS_FAILURE: + failedJobs = append(failedJobs, job.Name) + } + } + return len(q.jobs), strings.Join(newJobs, ", "), strings.Join(inProgressJobs, ", "), strings.Join(completedJobs, ", "), strings.Join(failedJobs, ", ") +} + +// TODO(kolesnikovae): container/heap is not very efficient, +// consider implementing own heap, specific to the case. + +type priorityQueue []*jobQueueEntry + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { return pq[i].less(pq[j]) } + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityQueue) Push(x interface{}) { + n := len(*pq) + job := x.(*jobQueueEntry) + job.index = n + *pq = append(*pq, job) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + job := old[n-1] + old[n-1] = nil + job.index = -1 + *pq = old[0 : n-1] + return job +} diff --git a/pkg/experiment/metastore/metastore_compaction_queue_test.go b/pkg/experiment/metastore/metastore_compaction_queue_test.go new file mode 100644 index 0000000000..09f1af46dd --- /dev/null +++ b/pkg/experiment/metastore/metastore_compaction_queue_test.go @@ -0,0 +1,71 @@ +package metastore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" +) + +func Test_compactionJobQueue(t *testing.T) { + var now int64 // Timestamp of the raft command. + lease := int64(10) // Job lease duration. + q := newJobQueue(lease) + + assert.True(t, q.enqueue(&compactionpb.CompactionJob{ + Name: "job1", + RaftLogIndex: 1, + CompactionLevel: 0, + })) + assert.True(t, q.enqueue(&compactionpb.CompactionJob{ + Name: "job2", + RaftLogIndex: 2, + CompactionLevel: 1, + })) + assert.True(t, q.enqueue(&compactionpb.CompactionJob{ + Name: "job3", + RaftLogIndex: 3, + CompactionLevel: 0, + })) + + // Token here is the raft command index. + assertJob(t, q.dequeue(now, 4), "job1", 4) // L0 + assertJob(t, q.dequeue(now, 5), "job3", 5) // L0 + assertJob(t, q.dequeue(now, 6), "job2", 6) // L1 + require.Nil(t, q.dequeue(now, 7)) // No jobs left. + require.Nil(t, q.dequeue(now, 8)) // No jobs left. + + // Time has passed. Updating the jobs: all but job1. + now += lease / 2 + assert.True(t, q.update("job3", now, 9)) // Postpone the deadline. + assert.True(t, q.update("job2", now, 10)) // Postpone the deadline. + require.Nil(t, q.dequeue(now, 11)) // No jobs left. + + // Time has passed: the initial lease has expired. + now += lease/2 + 1 + assertJob(t, q.dequeue(now, 12), "job1", 12) // Seizing ownership of expired job. + require.Nil(t, q.dequeue(now, 13)) // No jobs available yet. + + // Owner of the job1 awakes and tries to update the job. + assert.False(t, q.update("job1", now, 4)) // Postpone the deadline; stale owner is rejected. + assert.True(t, q.update("job1", now, 12)) // Postpone the deadline; new owner succeeds. + + assert.False(t, q.evict("job1", 4)) // Evicting the job; stale owner is rejected. + assert.True(t, q.evict("job1", 12)) // Postpone the deadline; new owner succeeds. + + // Jobs are evicted in the end, regardless of the status. + // We ignore expired lease, as long as nobody else has taken the job. + assert.True(t, q.evict("job2", 10)) + assert.True(t, q.evict("job3", 9)) + + // No jobs left. + require.Nil(t, q.dequeue(now, 14)) +} + +func assertJob(t *testing.T, j *compactionpb.CompactionJob, name string, commitIndex uint64) { + require.NotNil(t, j) + assert.Equal(t, name, j.Name) + assert.Equal(t, commitIndex, j.RaftLogIndex) +} diff --git a/pkg/experiment/metastore/metastore_fsm.go b/pkg/experiment/metastore/metastore_fsm.go new file mode 100644 index 0000000000..5d7c2caba5 --- /dev/null +++ b/pkg/experiment/metastore/metastore_fsm.go @@ -0,0 +1,218 @@ +package metastore + +import ( + "fmt" + "io" + "reflect" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "google.golang.org/protobuf/proto" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftlogpb" + "github.com/grafana/pyroscope/pkg/util" +) + +// The map is used to determine the type of the given command, +// when the request is converted to a Raft log entry. +var commandTypeMap = map[reflect.Type]raftlogpb.CommandType{ + reflect.TypeOf(new(metastorev1.AddBlockRequest)): raftlogpb.CommandType_COMMAND_TYPE_ADD_BLOCK, + reflect.TypeOf(new(raftlogpb.TruncateCommand)): raftlogpb.CommandType_COMMAND_TYPE_TRUNCATE, + reflect.TypeOf(new(compactorv1.PollCompactionJobsRequest)): raftlogpb.CommandType_COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS, +} + +// The map is used to determine the handler for the given command, +// read from the Raft log entry. +var commandHandlers = map[raftlogpb.CommandType]commandHandler{ + raftlogpb.CommandType_COMMAND_TYPE_ADD_BLOCK: func(fsm *FSM, cmd *raft.Log, raw []byte) fsmResponse { + return handleCommand(raw, cmd, fsm.state.applyAddBlock) + }, + raftlogpb.CommandType_COMMAND_TYPE_TRUNCATE: func(fsm *FSM, cmd *raft.Log, raw []byte) fsmResponse { + return handleCommand(raw, cmd, fsm.state.applyTruncate) + }, + raftlogpb.CommandType_COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS: func(fsm *FSM, cmd *raft.Log, raw []byte) fsmResponse { + return handleCommand(raw, cmd, fsm.state.applyPollCompactionJobs) + }, +} + +// TODO: Add registration functions. + +type FSM struct { + logger log.Logger + state *metastoreState + db *boltdb +} + +type fsmResponse struct { + msg proto.Message + err error +} + +type fsmError struct { + log *raft.Log + err error +} + +func errResponse(l *raft.Log, err error) fsmResponse { + return fsmResponse{err: &fsmError{log: l, err: err}} +} + +func (e *fsmError) Error() string { + if e.err == nil { + return "" + } + if e.log == nil { + return e.err.Error() + } + return fmt.Sprintf("term: %d; index: %d; appended_at: %v; error: %v", + e.log.Index, e.log.Term, e.log.AppendedAt, e.err) +} + +type commandHandler func(*FSM, *raft.Log, []byte) fsmResponse + +// TODO(kolesnikovae): replace commandCall with interface: +// type command[Req, Resp proto.Message] interface { +// apply(Req) (Resp, error) +// } + +type commandCall[Req, Resp proto.Message] func(*raft.Log, Req) (Resp, error) + +func newFSM(logger log.Logger, db *boltdb, state *metastoreState) *FSM { + return &FSM{ + logger: logger, + db: db, + state: state, + } +} + +// TODO(kolesnikovae): Implement BatchingFSM. + +func (fsm *FSM) Apply(l *raft.Log) interface{} { + switch l.Type { + case raft.LogNoop: + case raft.LogBarrier: + case raft.LogConfiguration: + case raft.LogCommand: + return fsm.applyCommand(l) + default: + _ = level.Warn(fsm.logger).Log("msg", "unexpected log entry, ignoring", "type", l.Type.String()) + } + return nil +} + +// applyCommand receives raw command from the raft log (FSM.Apply), +// and calls the corresponding handler on the _local_ FSM, based on +// the command type. +func (fsm *FSM) applyCommand(l *raft.Log) interface{} { + t1 := time.Now() + defer func() { + fsm.db.metrics.fsmApplyCommandHandlerDuration.Observe(time.Since(t1).Seconds()) + }() + var e raftlogpb.RaftLogEntry + if err := proto.Unmarshal(l.Data, &e); err != nil { + return errResponse(l, err) + } + if handler, ok := commandHandlers[e.Type]; ok { + return handler(fsm, l, e.Payload) + } + return errResponse(l, fmt.Errorf("unknown command type: %v", e.Type.String())) +} + +// handleCommand receives payload of the command from the raft log (FSM.Apply), +// and the function that processes the command. Returned response is wrapped in +// fsmResponse and is available to the FSM.Apply caller. +func handleCommand[Req, Resp proto.Message](raw []byte, cmd *raft.Log, call commandCall[Req, Resp]) fsmResponse { + var resp fsmResponse + defer func() { + if r := recover(); r != nil { + resp.err = util.PanicError(r) + } + }() + req := newProto[Req]() + if resp.err = proto.Unmarshal(raw, req); resp.err != nil { + return resp + } + resp.msg, resp.err = call(cmd, req) + return resp +} + +func newProto[T proto.Message]() T { + var msg T + msgType := reflect.TypeOf(msg).Elem() + return reflect.New(msgType).Interface().(T) +} + +func (fsm *FSM) Snapshot() (raft.FSMSnapshot, error) { + // Snapshot should only capture a pointer to the state, and any + // expensive IO should happen as part of FSMSnapshot.Persist. + return fsm.db.createSnapshot() +} + +func (fsm *FSM) Restore(snapshot io.ReadCloser) error { + t1 := time.Now() + _ = level.Info(fsm.logger).Log("msg", "restoring snapshot") + defer func() { + _ = snapshot.Close() + fsm.db.metrics.fsmRestoreSnapshotDuration.Observe(time.Since(t1).Seconds()) + }() + if err := fsm.db.restore(snapshot); err != nil { + return fmt.Errorf("failed to restore from snapshot: %w", err) + } + if err := fsm.state.restore(fsm.db); err != nil { + return fmt.Errorf("failed to restore state: %w", err) + } + return nil +} + +// applyCommand issues the command to the raft log based on the request type, +// and returns the response of FSM.Apply. +func applyCommand[Req, Resp proto.Message]( + log *raft.Raft, + req Req, + timeout time.Duration, +) ( + future raft.ApplyFuture, + resp Resp, + err error, +) { + defer func() { + if r := recover(); r != nil { + err = util.PanicError(r) + } + }() + raw, err := marshallRequest(req) + if err != nil { + return nil, resp, err + } + future = log.Apply(raw, timeout) + if err = future.Error(); err != nil { + return nil, resp, err + } + fsmResp := future.Response().(fsmResponse) + if fsmResp.msg != nil { + resp, _ = fsmResp.msg.(Resp) + } + return future, resp, fsmResp.err +} + +func marshallRequest[Req proto.Message](req Req) ([]byte, error) { + cmdType, ok := commandTypeMap[reflect.TypeOf(req)] + if !ok { + return nil, fmt.Errorf("unknown command type: %T", req) + } + var err error + entry := raftlogpb.RaftLogEntry{Type: cmdType} + entry.Payload, err = proto.Marshal(req) + if err != nil { + return nil, err + } + raw, err := proto.Marshal(&entry) + if err != nil { + return nil, err + } + return raw, nil +} diff --git a/pkg/experiment/metastore/metastore_hack.go b/pkg/experiment/metastore/metastore_hack.go new file mode 100644 index 0000000000..8ad66d3efb --- /dev/null +++ b/pkg/experiment/metastore/metastore_hack.go @@ -0,0 +1,100 @@ +package metastore + +import ( + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "github.com/oklog/ulid" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftlogpb" +) + +// FIXME(kolesnikovae): +// Remove once compaction is implemented. +// Or use index instead of the timestamp. + +func (m *Metastore) cleanupLoop() { + t := time.NewTicker(10 * time.Minute) + defer func() { + t.Stop() + m.wg.Done() + }() + for { + select { + case <-m.done: + return + case <-t.C: + if m.raft.State() != raft.Leader { + continue + } + timestamp := uint64(time.Now().Add(-12 * time.Hour).UnixMilli()) + req := &raftlogpb.TruncateCommand{Timestamp: timestamp} + _, _, err := applyCommand[*raftlogpb.TruncateCommand, *anypb.Any](m.raft, req, m.config.Raft.ApplyTimeout) + if err != nil { + _ = level.Error(m.logger).Log("msg", "failed to apply truncate command", "err", err) + } + } + } +} + +func (m *metastoreState) applyTruncate(_ *raft.Log, request *raftlogpb.TruncateCommand) (*anypb.Any, error) { + m.shardsMutex.Lock() + var g sync.WaitGroup + g.Add(len(m.shards)) + for shardID, shard := range m.shards { + go truncateSegmentsBefore(m.db, m.logger, &g, shardID, shard, request.Timestamp) + } + m.shardsMutex.Unlock() + g.Wait() + return &anypb.Any{}, nil +} + +func truncateSegmentsBefore( + db *boltdb, + log log.Logger, + wg *sync.WaitGroup, + shardID uint32, + shard *metastoreShard, + t uint64, +) { + defer wg.Done() + var c int + tx, err := db.boltdb.Begin(true) + if err != nil { + _ = level.Error(log).Log("msg", "failed to start transaction", "err", err) + return + } + defer func() { + if err = tx.Commit(); err != nil { + _ = level.Error(log).Log("msg", "failed to commit transaction", "err", err) + return + } + _ = level.Info(log).Log("msg", "stale segments truncated", "segments", c) + }() + + bucket, err := getBlockMetadataBucket(tx) + if err != nil { + _ = level.Error(log).Log("msg", "failed to get metadata bucket", "err", err) + return + } + shardBucket, _ := keyForBlockMeta(shardID, "", "") + bucket = bucket.Bucket(shardBucket) + + shard.segmentsMutex.Lock() + defer shard.segmentsMutex.Unlock() + + for k, segment := range shard.segments { + if ulid.MustParse(segment.Id).Time() < t { + if err = bucket.Delete([]byte(segment.Id)); err != nil { + _ = level.Error(log).Log("msg", "failed to delete stale segments", "err", err) + return + } + delete(shard.segments, k) + c++ + } + } +} diff --git a/pkg/experiment/metastore/metastore_metrics.go b/pkg/experiment/metastore/metastore_metrics.go new file mode 100644 index 0000000000..e0f404a1b0 --- /dev/null +++ b/pkg/experiment/metastore/metastore_metrics.go @@ -0,0 +1,54 @@ +package metastore + +import ( + "github.com/grafana/dskit/instrument" + "github.com/prometheus/client_golang/prometheus" +) + +type metastoreMetrics struct { + boltDBPersistSnapshotDuration prometheus.Histogram + boltDBRestoreSnapshotDuration prometheus.Histogram + fsmRestoreSnapshotDuration prometheus.Histogram + fsmApplyCommandHandlerDuration prometheus.Histogram + raftAddBlockDuration prometheus.Histogram +} + +func newMetastoreMetrics(reg prometheus.Registerer) *metastoreMetrics { + var dataTimingBuckets = prometheus.ExponentialBucketsRange(0.01, 20, 48) + m := &metastoreMetrics{ + boltDBPersistSnapshotDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "metastore_boltdb_persist_snapshot_duration_seconds", + //Buckets: dataTimingBuckets, + Buckets: instrument.DefBuckets, + }), + boltDBRestoreSnapshotDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "metastore_boltdb_restore_snapshot_duration_seconds", + Buckets: dataTimingBuckets, + }), + fsmRestoreSnapshotDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "metastore_fsm_restore_snapshot_duration_seconds", + Buckets: dataTimingBuckets, + }), + fsmApplyCommandHandlerDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "metastore_fsm_apply_command_handler_duration_seconds", + Buckets: dataTimingBuckets, + }), + raftAddBlockDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "pyroscope", + Name: "metastore_raft_add_block_duration_seconds", + Buckets: dataTimingBuckets, + }), + } + if reg != nil { + reg.MustRegister(m.boltDBPersistSnapshotDuration) + reg.MustRegister(m.boltDBRestoreSnapshotDuration) + reg.MustRegister(m.fsmRestoreSnapshotDuration) + reg.MustRegister(m.fsmApplyCommandHandlerDuration) + reg.MustRegister(m.raftAddBlockDuration) + } + return m +} diff --git a/pkg/experiment/metastore/metastore_readindex.go b/pkg/experiment/metastore/metastore_readindex.go new file mode 100644 index 0000000000..d031eadf56 --- /dev/null +++ b/pkg/experiment/metastore/metastore_readindex.go @@ -0,0 +1,132 @@ +package metastore + +import ( + "context" + "fmt" + "time" + + "github.com/go-kit/log" + "github.com/google/uuid" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" +) + +var tcheckFreq = 10 * time.Millisecond + +func (m *Metastore) ReadIndex(ctx context.Context, req *metastorev1.ReadIndexRequest) (*metastorev1.ReadIndexResponse, error) { + //todo + //If the leader has not yet marked an entry from its current term committed, it waits until it + //has done so. The Leader Completeness Property guarantees that a leader has all committed + //entries, but at the start of its term, it may not know which those are. To find out, it needs to + //commit an entry from its term. Raft handles this by having each leader commit a blank no-op + //entry into the log at the start of its term. As soon as this no-op entry is committed, the leader’s + //commit index will be at least as large as any other servers’ during its term. + t := time.Now() + readIndex := m.raft.CommitIndex() + raftLogger := func() log.Logger { + return log.With(m.logger, "component", "raft_debug", + "request_id", req.DebugRequestId, + "op", "ReadIndex", + "read_index", readIndex, + "applied_index", m.raft.AppliedIndex(), + "commit_index", m.raft.CommitIndex(), + "last_index", m.raft.LastIndex(), + "duration", time.Since(t), + ) + } + + raftLogger().Log("msg", "verify_leader") + if err := m.raft.VerifyLeader().Error(); err != nil { + return new(metastorev1.ReadIndexResponse), err + } + + tcheck := time.NewTicker(tcheckFreq) + defer tcheck.Stop() + timeout := time.NewTimer(5 * time.Second) + defer timeout.Stop() + + for { + select { + case <-tcheck.C: + appliedIndex := m.raft.AppliedIndex() + raftLogger().Log("msg", "tick") + if appliedIndex >= readIndex { + raftLogger().Log("msg", "caught up") + return &metastorev1.ReadIndexResponse{ReadIndex: readIndex}, nil + } + continue + case <-timeout.C: + raftLogger().Log("err", "timeout") + return new(metastorev1.ReadIndexResponse), fmt.Errorf("timeout") + case <-ctx.Done(): + raftLogger().Log("err", "context canceled") + return new(metastorev1.ReadIndexResponse), fmt.Errorf("canceled %w", ctx.Err()) + } + } +} + +func (m *Metastore) CheckReady(ctx context.Context) (err error) { + const ( + ready = "ready" + notReady = "not_ready" + status = "status" + ) + debugRequestId := uuid.Must(uuid.NewRandom()).String() //todo delete + readIndex := uint64(0) + t := time.Now() + raftLogger := func() log.Logger { + return log.With(m.logger, "component", "raft_debug", + "request_id", debugRequestId, + "op", "CheckReady", + "read_index", readIndex, + "applied_index", m.raft.AppliedIndex(), + "commit_index", m.raft.CommitIndex(), + "last_index", m.raft.LastIndex(), + "duration", time.Since(t), + ) + } + raftLogger().Log("msg", "check") + req := new(metastorev1.ReadIndexRequest) + req.DebugRequestId = debugRequestId + res, err := m.client.ReadIndex(ctx, req) + if err != nil { + err = fmt.Errorf("failed to get read index: %w", err) + raftLogger().Log(status, notReady, "err", err) + return err + } + readIndex = res.ReadIndex + + tcheck := time.NewTicker(tcheckFreq) + defer tcheck.Stop() + timeout := time.NewTimer(5 * time.Second) + defer timeout.Stop() + + for { + select { + case <-tcheck.C: + commitIndex := m.raft.CommitIndex() + raftLogger().Log("msg", "tick") + if commitIndex >= res.ReadIndex { + if m.readySince.IsZero() { + m.readySince = time.Now() + } + minReadyTime := 30 * time.Second + if time.Since(m.readySince) < minReadyTime { + err := fmt.Errorf("waiting for %v after being ready", minReadyTime) + raftLogger().Log(status, notReady, "err", err) + return err + } + + raftLogger().Log(status, ready) + return nil + } + continue + case <-timeout.C: + raftLogger().Log(status, notReady, "err", "timeout") + return fmt.Errorf("metastore ready check timeout") + case <-ctx.Done(): + raftLogger().Log(status, notReady, "err", "context canceled") + return fmt.Errorf("metastore check context canceled %w", ctx.Err()) + } + } +} diff --git a/pkg/experiment/metastore/metastore_state.go b/pkg/experiment/metastore/metastore_state.go new file mode 100644 index 0000000000..3994f01f56 --- /dev/null +++ b/pkg/experiment/metastore/metastore_state.go @@ -0,0 +1,228 @@ +package metastore + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" +) + +const ( + compactionBucketJobPreQueuePrefix = "job-pre-queue" +) + +type tenantShard struct { + tenant string + shard uint32 +} + +type metastoreState struct { + logger log.Logger + compactionMetrics *compactionMetrics + + shardsMutex sync.Mutex + shards map[uint32]*metastoreShard + + compactionPlansMutex sync.Mutex + preCompactionQueues map[tenantShard]*jobPreQueue + compactionJobQueue *jobQueue + + db *boltdb +} + +type metastoreShard struct { + segmentsMutex sync.Mutex + segments map[string]*metastorev1.BlockMeta +} + +func newMetastoreState(logger log.Logger, db *boltdb, reg prometheus.Registerer) *metastoreState { + return &metastoreState{ + logger: logger, + shards: make(map[uint32]*metastoreShard), + db: db, + preCompactionQueues: make(map[tenantShard]*jobPreQueue), + compactionJobQueue: newJobQueue(jobLeaseDuration.Nanoseconds()), + compactionMetrics: newCompactionMetrics(reg), + } +} + +func (m *metastoreState) reset(db *boltdb) { + m.shardsMutex.Lock() + clear(m.shards) + clear(m.preCompactionQueues) + m.compactionJobQueue = newJobQueue(jobLeaseDuration.Nanoseconds()) + m.db = db + m.shardsMutex.Unlock() +} + +func (m *metastoreState) getOrCreateShard(shardID uint32) *metastoreShard { + m.shardsMutex.Lock() + defer m.shardsMutex.Unlock() + if shard, ok := m.shards[shardID]; ok { + return shard + } + shard := newMetastoreShard() + m.shards[shardID] = shard + return shard +} + +func (m *metastoreState) restore(db *boltdb) error { + m.reset(db) + return db.boltdb.View(func(tx *bbolt.Tx) error { + if err := m.restoreBlockMetadata(tx); err != nil { + return fmt.Errorf("failed to restore metadata entries: %w", err) + } + return m.restoreCompactionPlan(tx) + }) +} + +func (m *metastoreState) restoreBlockMetadata(tx *bbolt.Tx) error { + mdb, err := getBlockMetadataBucket(tx) + switch { + case err == nil: + case errors.Is(err, bbolt.ErrBucketNotFound): + return nil + default: + return err + } + // List shards in the block_metadata bucket: + // block_metadata/[{shard_id}]/[block_id] + // TODO(kolesnikovae): Load concurrently. + return mdb.ForEachBucket(func(name []byte) error { + shardID, _, ok := parseBucketName(name) + if !ok { + _ = level.Error(m.logger).Log("msg", "malformed bucket name", "name", string(name)) + return nil + } + shard := m.getOrCreateShard(shardID) + return shard.loadSegments(mdb.Bucket(name)) + }) +} + +func (m *metastoreState) restoreCompactionPlan(tx *bbolt.Tx) error { + cdb, err := getCompactionJobBucket(tx) + switch { + case err == nil: + case errors.Is(err, bbolt.ErrBucketNotFound): + return nil + default: + return err + } + return cdb.ForEachBucket(func(name []byte) error { + shard, tenant, ok := parseBucketName(name) + if !ok { + _ = level.Error(m.logger).Log("msg", "malformed bucket name", "name", string(name)) + return nil + } + key := tenantShard{ + tenant: tenant, + shard: shard, + } + preQueue := m.getOrCreatePreQueue(key) + + return m.loadCompactionPlan(cdb.Bucket(name), preQueue) + }) + +} + +func (m *metastoreState) getOrCreatePreQueue(key tenantShard) *jobPreQueue { + m.compactionPlansMutex.Lock() + defer m.compactionPlansMutex.Unlock() + + if preQueue, ok := m.preCompactionQueues[key]; ok { + return preQueue + } + plan := &jobPreQueue{ + blocksByLevel: make(map[uint32][]string), + } + m.preCompactionQueues[key] = plan + return plan +} + +func (m *metastoreState) findJob(name string) *compactionpb.CompactionJob { + m.compactionJobQueue.mu.Lock() + defer m.compactionJobQueue.mu.Unlock() + if jobEntry, exists := m.compactionJobQueue.jobs[name]; exists { + return jobEntry.CompactionJob + } + return nil +} + +func newMetastoreShard() *metastoreShard { + return &metastoreShard{ + segments: make(map[string]*metastorev1.BlockMeta), + } +} + +func (s *metastoreShard) putSegment(segment *metastorev1.BlockMeta) { + s.segmentsMutex.Lock() + s.segments[segment.Id] = segment + s.segmentsMutex.Unlock() +} + +func (s *metastoreShard) deleteSegment(segment *metastorev1.BlockMeta) { + s.segmentsMutex.Lock() + delete(s.segments, segment.Id) + s.segmentsMutex.Unlock() +} + +func (s *metastoreShard) loadSegments(b *bbolt.Bucket) error { + s.segmentsMutex.Lock() + defer s.segmentsMutex.Unlock() + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + var md metastorev1.BlockMeta + if err := md.UnmarshalVT(v); err != nil { + return fmt.Errorf("failed to block %q: %w", string(k), err) + } + s.segments[md.Id] = &md + } + return nil +} + +func (m *metastoreState) loadCompactionPlan(b *bbolt.Bucket, preQueue *jobPreQueue) error { + preQueue.mu.Lock() + defer preQueue.mu.Unlock() + + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if strings.HasPrefix(string(k), compactionBucketJobPreQueuePrefix) { + var storedPreQueue compactionpb.JobPreQueue + if err := storedPreQueue.UnmarshalVT(v); err != nil { + return fmt.Errorf("failed to load job pre queue %q: %w", string(k), err) + } + preQueue.blocksByLevel[storedPreQueue.CompactionLevel] = storedPreQueue.Blocks + level.Debug(m.logger).Log( + "msg", "restored pre queue", + "shard", storedPreQueue.Shard, + "compaction_level", storedPreQueue.CompactionLevel, + "block_count", len(storedPreQueue.Blocks), + "blocks", storedPreQueue.Blocks) + } else { + var job compactionpb.CompactionJob + if err := job.UnmarshalVT(v); err != nil { + return fmt.Errorf("failed to unmarshal job %q: %w", string(k), err) + } + m.compactionJobQueue.enqueue(&job) + level.Debug(m.logger).Log( + "msg", "restored job into queue", + "shard", job.Shard, + "tenant", job.TenantId, + "compaction_level", job.CompactionLevel, + "job_status", job.Status.String(), + "raft_log_index", job.RaftLogIndex, + "lease_expires_at", job.LeaseExpiresAt, + "block_count", len(job.Blocks), + "blocks", job.Blocks) + } + } + return nil +} diff --git a/pkg/experiment/metastore/metastore_state_add_block.go b/pkg/experiment/metastore/metastore_state_add_block.go new file mode 100644 index 0000000000..9a4052ee51 --- /dev/null +++ b/pkg/experiment/metastore/metastore_state_add_block.go @@ -0,0 +1,92 @@ +package metastore + +import ( + "context" + "errors" + "fmt" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/go-kit/log/level" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" + + "github.com/hashicorp/raft" + "go.etcd.io/bbolt" +) + +func (m *Metastore) AddBlock(_ context.Context, req *metastorev1.AddBlockRequest) (*metastorev1.AddBlockResponse, error) { + _ = level.Info(m.logger).Log( + "msg", "adding block", + "block_id", req.Block.Id, + "shard", req.Block.Shard, + "raft_commit_index", m.raft.CommitIndex(), + "raft_last_index", m.raft.LastIndex(), + "raft_applied_index", m.raft.AppliedIndex()) + t1 := time.Now() + defer func() { + m.metrics.raftAddBlockDuration.Observe(time.Since(t1).Seconds()) + level.Debug(m.logger).Log("msg", "add block duration", "block_id", req.Block.Id, "shard", req.Block.Shard, "duration", time.Since(t1)) + }() + _, resp, err := applyCommand[*metastorev1.AddBlockRequest, *metastorev1.AddBlockResponse](m.raft, req, m.config.Raft.ApplyTimeout) + if err != nil { + _ = level.Error(m.logger).Log("msg", "failed to apply add block", "block_id", req.Block.Id, "shard", req.Block.Shard, "err", err) + if m.shouldRetryAddBlock(err) { + return resp, status.Error(codes.Unavailable, err.Error()) + } + } + return resp, err +} + +func (m *Metastore) shouldRetryAddBlock(err error) bool { + return errors.Is(err, raft.ErrLeadershipLost) || + errors.Is(err, raft.ErrNotLeader) || + errors.Is(err, raft.ErrLeadershipTransferInProgress) || + errors.Is(err, raft.ErrRaftShutdown) +} + +func (m *metastoreState) applyAddBlock(log *raft.Log, request *metastorev1.AddBlockRequest) (*metastorev1.AddBlockResponse, error) { + name, key := keyForBlockMeta(request.Block.Shard, "", request.Block.Id) + value, err := request.Block.MarshalVT() + if err != nil { + return nil, err + } + + var jobToAdd *compactionpb.CompactionJob + var blockToAddToQueue *metastorev1.BlockMeta + + err = m.db.boltdb.Update(func(tx *bbolt.Tx) error { + err := updateBlockMetadataBucket(tx, name, func(bucket *bbolt.Bucket) error { + return bucket.Put(key, value) + }) + if err != nil { + return err + } + err, jobToAdd, blockToAddToQueue = m.consumeBlock(request.Block, tx, log.Index) + return nil + }) + if err != nil { + _ = level.Error(m.logger).Log( + "msg", "failed to add block", + "block", request.Block.Id, + "err", err, + ) + return nil, err + } + m.getOrCreateShard(request.Block.Shard).putSegment(request.Block) + if jobToAdd != nil { + m.addCompactionJob(jobToAdd) + m.compactionMetrics.addedBlocks.WithLabelValues( + fmt.Sprint(jobToAdd.Shard), jobToAdd.TenantId, fmt.Sprint(jobToAdd.CompactionLevel)).Inc() + m.compactionMetrics.addedJobs.WithLabelValues( + fmt.Sprint(jobToAdd.Shard), jobToAdd.TenantId, fmt.Sprint(jobToAdd.CompactionLevel)).Inc() + } else if blockToAddToQueue != nil { + m.addBlockToCompactionJobQueue(blockToAddToQueue) + m.compactionMetrics.addedBlocks.WithLabelValues( + fmt.Sprint(blockToAddToQueue.Shard), blockToAddToQueue.TenantId, fmt.Sprint(blockToAddToQueue.CompactionLevel)).Inc() + } + return &metastorev1.AddBlockResponse{}, nil +} diff --git a/pkg/experiment/metastore/metastore_state_poll_compaction_jobs.go b/pkg/experiment/metastore/metastore_state_poll_compaction_jobs.go new file mode 100644 index 0000000000..b0802bc2a5 --- /dev/null +++ b/pkg/experiment/metastore/metastore_state_poll_compaction_jobs.go @@ -0,0 +1,342 @@ +package metastore + +import ( + "context" + "fmt" + "math" + + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "github.com/pkg/errors" + "go.etcd.io/bbolt" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" +) + +func (m *Metastore) PollCompactionJobs(_ context.Context, req *compactorv1.PollCompactionJobsRequest) (*compactorv1.PollCompactionJobsResponse, error) { + level.Debug(m.logger).Log( + "msg", "received poll compaction jobs request", + "num_updates", len(req.JobStatusUpdates), + "job_capacity", req.JobCapacity, + "raft_commit_index", m.raft.CommitIndex(), + "raft_last_index", m.raft.LastIndex(), + "raft_applied_index", m.raft.AppliedIndex()) + _, resp, err := applyCommand[*compactorv1.PollCompactionJobsRequest, *compactorv1.PollCompactionJobsResponse](m.raft, req, m.config.Raft.ApplyTimeout) + return resp, err +} + +type jobResult struct { + newBlocks []*metastorev1.BlockMeta + deletedBlocks []*metastorev1.BlockMeta + newJobs []*compactionpb.CompactionJob + newQueuedBlocks []*metastorev1.BlockMeta + deletedJobs []*compactionpb.CompactionJob + + newJobAssignments []*compactionpb.CompactionJob +} + +func (m *metastoreState) applyPollCompactionJobs(raft *raft.Log, request *compactorv1.PollCompactionJobsRequest) (resp *compactorv1.PollCompactionJobsResponse, err error) { + resp = &compactorv1.PollCompactionJobsResponse{} + level.Debug(m.logger).Log( + "msg", "applying poll compaction jobs", + "num_updates", len(request.JobStatusUpdates), + "job_capacity", request.JobCapacity, + "raft_log_index", raft.Index) + + jResult := &jobResult{ + newBlocks: make([]*metastorev1.BlockMeta, 0), + deletedBlocks: make([]*metastorev1.BlockMeta, 0), + newJobs: make([]*compactionpb.CompactionJob, 0), + newQueuedBlocks: make([]*metastorev1.BlockMeta, 0), + deletedJobs: make([]*compactionpb.CompactionJob, 0), + newJobAssignments: make([]*compactionpb.CompactionJob, 0), + } + + err = m.db.boltdb.Update(func(tx *bbolt.Tx) error { + for _, statusUpdate := range request.JobStatusUpdates { + job := m.findJob(statusUpdate.JobName) + if job == nil { + level.Error(m.logger).Log("msg", "error processing update for compaction job, job not found", "job", statusUpdate.JobName, "err", err) + continue + } + + level.Debug(m.logger).Log("msg", "processing status update for compaction job", "job", statusUpdate.JobName, "status", statusUpdate.Status) + name, _ := keyForCompactionJob(statusUpdate.Shard, statusUpdate.TenantId, statusUpdate.JobName) + err := updateCompactionJobBucket(tx, name, func(bucket *bbolt.Bucket) error { + switch statusUpdate.Status { // TODO: handle other cases + case compactorv1.CompactionStatus_COMPACTION_STATUS_SUCCESS: + err := m.processCompletedJob(tx, job, statusUpdate, jResult, raft.Index) + if err != nil { + level.Error(m.logger).Log("msg", "failed to update completed job", "job", job.Name, "err", err) + return errors.Wrap(err, "failed to update completed job") + } + case compactorv1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS: + if m.compactionJobQueue.isOwner(statusUpdate.JobName, statusUpdate.RaftLogIndex) { + err := m.persistJobDeadline(tx, job, m.compactionJobQueue.getNewDeadline(raft.AppendedAt.UnixNano())) + if err != nil { + return errors.Wrap(err, "failed to update compaction job deadline") + } + m.compactionJobQueue.update(statusUpdate.JobName, raft.AppendedAt.UnixNano(), statusUpdate.RaftLogIndex) + } else { + level.Warn(m.logger).Log("msg", "compaction job status update rejected", "job", job.Name, "raft_log_index", statusUpdate.RaftLogIndex) + return errors.New("compaction job status update rejected") + } + } + return nil + }) + if err != nil { + level.Error(m.logger).Log("msg", "error processing update for compaction job", "job", job.Name, "err", err) + continue + } + } + + if request.JobCapacity > 0 { + jResult.newJobAssignments, err = m.assignNewJobs(tx, int(request.JobCapacity), raft.Index, raft.AppendedAt.UnixNano()) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + return nil, err + } + + // now update the state + for _, b := range jResult.newBlocks { + m.getOrCreateShard(b.Shard).putSegment(b) + m.compactionMetrics.addedBlocks.WithLabelValues(fmt.Sprint(b.Shard), b.TenantId, fmt.Sprint(b.CompactionLevel)).Inc() + } + + for _, b := range jResult.deletedBlocks { + m.getOrCreateShard(b.Shard).deleteSegment(b) + m.compactionMetrics.deletedBlocks.WithLabelValues(fmt.Sprint(b.Shard), b.TenantId, fmt.Sprint(b.CompactionLevel)).Inc() + } + + for _, j := range jResult.newJobs { + m.addCompactionJob(j) + m.compactionMetrics.addedJobs.WithLabelValues(fmt.Sprint(j.Shard), j.TenantId, fmt.Sprint(j.CompactionLevel)).Inc() + } + + for _, b := range jResult.newQueuedBlocks { + m.addBlockToCompactionJobQueue(b) + // already counted above + } + + for _, j := range jResult.deletedJobs { + m.compactionJobQueue.evict(j.Name, j.RaftLogIndex) + m.compactionMetrics.completedJobs.WithLabelValues(fmt.Sprint(j.Shard), j.TenantId, fmt.Sprint(j.CompactionLevel)).Inc() + } + + resp.CompactionJobs, err = m.convertJobs(jResult.newJobAssignments) + for _, j := range resp.CompactionJobs { + m.compactionMetrics.assignedJobs.WithLabelValues(fmt.Sprint(j.Shard), j.TenantId, fmt.Sprint(j.CompactionLevel)).Inc() + } + + return resp, err +} + +func (m *metastoreState) convertJobs(jobs []*compactionpb.CompactionJob) ([]*compactorv1.CompactionJob, error) { + res := make([]*compactorv1.CompactionJob, 0, len(jobs)) + for _, job := range jobs { + // populate block metadata (workers rely on it) + blocks := make([]*metastorev1.BlockMeta, 0, len(job.Blocks)) + for _, bId := range job.Blocks { + b := m.findBlock(job.Shard, bId) + if b == nil { + level.Error(m.logger).Log( + "msg", "failed to populate job details, block not found", + "block", bId, + "shard", job.Shard, + "job", job.Name) + continue + } + blocks = append(blocks, b) + } + if len(blocks) == 0 { + evicted := m.compactionJobQueue.evict(job.Name, math.MaxInt64) + level.Warn(m.logger).Log("msg", "skipping assigned compaction job since it has no valid blocks", "job", job.Name, "evicted", evicted) + continue + } + + res = append(res, &compactorv1.CompactionJob{ + Name: job.Name, + Blocks: blocks, + Status: &compactorv1.CompactionJobStatus{ + JobName: job.Name, + Status: compactorv1.CompactionStatus(job.Status), + RaftLogIndex: job.RaftLogIndex, + Shard: job.Shard, + TenantId: job.TenantId, + }, + CompactionLevel: job.CompactionLevel, + RaftLogIndex: job.RaftLogIndex, + Shard: job.Shard, + TenantId: job.TenantId, + }) + } + return res, nil +} + +func (m *metastoreState) processCompletedJob( + tx *bbolt.Tx, + job *compactionpb.CompactionJob, + update *compactorv1.CompactionJobStatus, + jResult *jobResult, + raftLogIndex uint64, +) error { + ownsJob := m.compactionJobQueue.isOwner(job.Name, update.RaftLogIndex) + if !ownsJob { + return errors.New(fmt.Sprintf("deadline exceeded for job with id %s", job.Name)) + } + jBucket, jKey := keyForCompactionJob(job.Shard, job.TenantId, job.Name) + err := updateCompactionJobBucket(tx, jBucket, func(bucket *bbolt.Bucket) error { + return bucket.Delete(jKey) + }) + if err != nil { + return err + } + jResult.deletedJobs = append(jResult.deletedJobs, job) + for _, b := range update.CompletedJob.Blocks { + bName, bKey := keyForBlockMeta(b.Shard, b.TenantId, b.Id) + err = updateBlockMetadataBucket(tx, bName, func(bucket *bbolt.Bucket) error { + bValue, _ := b.MarshalVT() + return bucket.Put(bKey, bValue) + }) + if err != nil { + _ = level.Error(m.logger).Log( + "msg", "failed to add block", + "block", b.Id, + "err", err, + ) + return err + } + jResult.newBlocks = append(jResult.newBlocks, b) + + // create and store an optional compaction job + err, jobToAdd, blockForQueue := m.consumeBlock(b, tx, raftLogIndex) + if err != nil { + return err + } + if jobToAdd != nil { + jResult.newJobs = append(jResult.newJobs, jobToAdd) + } else if blockForQueue != nil { + jResult.newQueuedBlocks = append(jResult.newQueuedBlocks, blockForQueue) + } + } + + // delete source blocks + bName, _ := keyForBlockMeta(job.Shard, job.TenantId, "") + err = updateBlockMetadataBucket(tx, bName, func(bucket *bbolt.Bucket) error { + for _, bId := range job.Blocks { + level.Debug(m.logger).Log("msg", "deleting block from storage", "block", bId, "compaction_job", job.Name) + b := m.findBlock(job.Shard, bId) + if b == nil { + level.Error(m.logger).Log("msg", "failed to delete block from storage, block not found", "block", bId, "shard", job.Shard) + return errors.Wrapf(err, "failed to find compaction job source block %s for deletion", bId) + } + + _, bKey := keyForBlockMeta(b.Shard, b.TenantId, b.Id) + err := bucket.Delete(bKey) + if err != nil { + return errors.Wrapf(err, "failed to delete compaction job source block %s", b.Id) + } + jResult.deletedBlocks = append(jResult.deletedBlocks, b) + } + return nil + }) + if err != nil { + return err + } + job.RaftLogIndex = update.RaftLogIndex + return nil +} + +func (m *metastoreState) findBlock(shard uint32, blockId string) *metastorev1.BlockMeta { + segmentShard := m.getOrCreateShard(shard) + segmentShard.segmentsMutex.Lock() + defer segmentShard.segmentsMutex.Unlock() + + return segmentShard.segments[blockId] +} + +func (m *metastoreState) persistAssignedJob(tx *bbolt.Tx, job *compactionpb.CompactionJob) error { + return m.persistJob(tx, job, func(storedJob *compactionpb.CompactionJob) { + storedJob.Status = job.Status + storedJob.LeaseExpiresAt = job.LeaseExpiresAt + storedJob.RaftLogIndex = job.RaftLogIndex + }) +} + +func (m *metastoreState) persistJobDeadline(tx *bbolt.Tx, job *compactionpb.CompactionJob, leaseExpiresAt int64) error { + return m.persistJob(tx, job, func(storedJob *compactionpb.CompactionJob) { + storedJob.LeaseExpiresAt = leaseExpiresAt + }) +} + +func (m *metastoreState) persistJob(tx *bbolt.Tx, job *compactionpb.CompactionJob, fn func(compactionJob *compactionpb.CompactionJob)) error { + jobBucketName, jobKey := keyForCompactionJob(job.Shard, job.TenantId, job.Name) + err := updateCompactionJobBucket(tx, jobBucketName, func(bucket *bbolt.Bucket) error { + storedJobData := bucket.Get(jobKey) + if storedJobData == nil { + return errors.New("compaction job not found in storage") + } + var storedJob compactionpb.CompactionJob + err := storedJob.UnmarshalVT(storedJobData) + if err != nil { + return errors.Wrap(err, "failed to unmarshal compaction job data") + } + fn(&storedJob) + jobData, _ := storedJob.MarshalVT() + return bucket.Put(jobKey, jobData) + }) + return err +} + +func (m *metastoreState) assignNewJobs(tx *bbolt.Tx, jobCapacity int, raftLogIndex uint64, now int64) ([]*compactionpb.CompactionJob, error) { + jobsToAssign := m.findJobsToAssign(jobCapacity, raftLogIndex, now) + level.Debug(m.logger).Log("msg", "compaction jobs to assign", "jobs", len(jobsToAssign), "raft_log_index", raftLogIndex, "capacity", jobCapacity) + + for _, job := range jobsToAssign { + // mark job "in progress" + err := m.persistAssignedJob(tx, job) + if err != nil { + level.Error(m.logger).Log("msg", "failed to update job status", "job", job.Name, "err", err) + // return the job back to the queue + m.compactionJobQueue.enqueue(job) + return nil, errors.Wrap(err, "failed to update job status") + } + } + + return jobsToAssign, nil +} + +func (m *metastoreState) findJobsToAssign(jobCapacity int, raftLogIndex uint64, now int64) []*compactionpb.CompactionJob { + jobsToAssign := make([]*compactionpb.CompactionJob, 0, jobCapacity) + jobCount, newJobs, inProgressJobs, completedJobs, failedJobs := m.compactionJobQueue.stats() + level.Debug(m.logger).Log( + "msg", "looking for jobs to assign", + "job_capacity", jobCapacity, + "raft_log_index", raftLogIndex, + "job_queue_size", jobCount, + "new_jobs_in_queue", newJobs, + "in_progress_jobs_in_queue", inProgressJobs, + "completed_jobs_in_queue", completedJobs, + "failed_jobs_in_queue", failedJobs, + ) + + var j *compactionpb.CompactionJob + for len(jobsToAssign) < jobCapacity { + j = m.compactionJobQueue.dequeue(now, raftLogIndex) + if j == nil { + break + } + level.Debug(m.logger).Log("msg", "assigning job to raftLogIndex", "job", j, "raft_log_index", raftLogIndex) + jobsToAssign = append(jobsToAssign, j) + } + + return jobsToAssign +} diff --git a/pkg/experiment/metastore/raftleader/raftleader.go b/pkg/experiment/metastore/raftleader/raftleader.go new file mode 100644 index 0000000000..c00a4f251f --- /dev/null +++ b/pkg/experiment/metastore/raftleader/raftleader.go @@ -0,0 +1,133 @@ +package raftleader + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/hashicorp/raft" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/grafana/pyroscope/pkg/util/health" +) + +type HealthObserver struct { + server health.Service + logger log.Logger + mu sync.Mutex + registered map[serviceKey]*raftService + metrics *Metrics +} +type Metrics struct { + status prometheus.Gauge +} + +func NewMetrics(reg prometheus.Registerer) *Metrics { + m := &Metrics{ + status: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "pyroscope", + Name: "metastore_raft_status", + }), + } + if reg != nil { + reg.MustRegister(m.status) + } + return m +} + +func NewRaftLeaderHealthObserver(hs health.Service, logger log.Logger, m *Metrics) *HealthObserver { + return &HealthObserver{ + server: hs, + logger: logger, + metrics: m, + registered: make(map[serviceKey]*raftService), + } +} + +func (hs *HealthObserver) Register(r *raft.Raft, service string) { + hs.mu.Lock() + defer hs.mu.Unlock() + k := serviceKey{raft: r, service: service} + if _, ok := hs.registered[k]; ok { + return + } + svc := &raftService{ + server: hs.server, + hs: hs, + logger: log.With(hs.logger, "service", service), + service: service, + raft: r, + c: make(chan raft.Observation, 1), + stop: make(chan struct{}), + done: make(chan struct{}), + } + _ = level.Debug(svc.logger).Log("msg", "registering health check") + svc.updateStatus() + go svc.run() + svc.observer = raft.NewObserver(svc.c, true, func(o *raft.Observation) bool { + _, ok := o.Data.(raft.LeaderObservation) + return ok + }) + r.RegisterObserver(svc.observer) + hs.registered[k] = svc +} + +func (hs *HealthObserver) Deregister(r *raft.Raft, service string) { + hs.mu.Lock() + k := serviceKey{raft: r, service: service} + svc, ok := hs.registered[k] + delete(hs.registered, k) + hs.mu.Unlock() + if ok { + close(svc.stop) + <-svc.done + } +} + +type serviceKey struct { + raft *raft.Raft + service string +} + +type raftService struct { + server health.Service + hs *HealthObserver + logger log.Logger + service string + raft *raft.Raft + observer *raft.Observer + c chan raft.Observation + stop chan struct{} + done chan struct{} +} + +func (svc *raftService) run() { + defer func() { + close(svc.done) + }() + for { + select { + case <-svc.c: + svc.updateStatus() + case <-svc.stop: + _ = level.Debug(svc.logger).Log("msg", "deregistering health check") + // We explicitly remove the service from serving when we stop observing it. + svc.server.SetServingStatus(svc.service, grpc_health_v1.HealthCheckResponse_NOT_SERVING) + svc.raft.DeregisterObserver(svc.observer) + return + } + } +} + +func (svc *raftService) updateStatus() { + status := grpc_health_v1.HealthCheckResponse_NOT_SERVING + if svc.raft.State() == raft.Leader { + status = grpc_health_v1.HealthCheckResponse_SERVING + } + svc.hs.metrics.status.Set(float64(svc.raft.State())) + + _ = level.Info(svc.logger).Log("msg", "updating health status", "status", status) + svc.server.SetServingStatus(svc.service, status) +} diff --git a/pkg/experiment/metastore/raftlogpb/raflog.pb.go b/pkg/experiment/metastore/raftlogpb/raflog.pb.go new file mode 100644 index 0000000000..19014faa75 --- /dev/null +++ b/pkg/experiment/metastore/raftlogpb/raflog.pb.go @@ -0,0 +1,292 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: experiment/metastore/raftlogpb/raflog.proto + +package raftlogpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CommandType int32 + +const ( + CommandType_COMMAND_TYPE_UNKNOWN CommandType = 0 + CommandType_COMMAND_TYPE_ADD_BLOCK CommandType = 1 + CommandType_COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS CommandType = 2 + // This is a temporary solution. + CommandType_COMMAND_TYPE_TRUNCATE CommandType = 4196 +) + +// Enum value maps for CommandType. +var ( + CommandType_name = map[int32]string{ + 0: "COMMAND_TYPE_UNKNOWN", + 1: "COMMAND_TYPE_ADD_BLOCK", + 2: "COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS", + 4196: "COMMAND_TYPE_TRUNCATE", + } + CommandType_value = map[string]int32{ + "COMMAND_TYPE_UNKNOWN": 0, + "COMMAND_TYPE_ADD_BLOCK": 1, + "COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS": 2, + "COMMAND_TYPE_TRUNCATE": 4196, + } +) + +func (x CommandType) Enum() *CommandType { + p := new(CommandType) + *p = x + return p +} + +func (x CommandType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommandType) Descriptor() protoreflect.EnumDescriptor { + return file_experiment_metastore_raftlogpb_raflog_proto_enumTypes[0].Descriptor() +} + +func (CommandType) Type() protoreflect.EnumType { + return &file_experiment_metastore_raftlogpb_raflog_proto_enumTypes[0] +} + +func (x CommandType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommandType.Descriptor instead. +func (CommandType) EnumDescriptor() ([]byte, []int) { + return file_experiment_metastore_raftlogpb_raflog_proto_rawDescGZIP(), []int{0} +} + +type RaftLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type CommandType `protobuf:"varint,1,opt,name=type,proto3,enum=raft_log.CommandType" json:"type,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *RaftLogEntry) Reset() { + *x = RaftLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RaftLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RaftLogEntry) ProtoMessage() {} + +func (x *RaftLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RaftLogEntry.ProtoReflect.Descriptor instead. +func (*RaftLogEntry) Descriptor() ([]byte, []int) { + return file_experiment_metastore_raftlogpb_raflog_proto_rawDescGZIP(), []int{0} +} + +func (x *RaftLogEntry) GetType() CommandType { + if x != nil { + return x.Type + } + return CommandType_COMMAND_TYPE_UNKNOWN +} + +func (x *RaftLogEntry) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type TruncateCommand struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *TruncateCommand) Reset() { + *x = TruncateCommand{} + if protoimpl.UnsafeEnabled { + mi := &file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TruncateCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TruncateCommand) ProtoMessage() {} + +func (x *TruncateCommand) ProtoReflect() protoreflect.Message { + mi := &file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TruncateCommand.ProtoReflect.Descriptor instead. +func (*TruncateCommand) Descriptor() ([]byte, []int) { + return file_experiment_metastore_raftlogpb_raflog_proto_rawDescGZIP(), []int{1} +} + +func (x *TruncateCommand) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +var File_experiment_metastore_raftlogpb_raflog_proto protoreflect.FileDescriptor + +var file_experiment_metastore_raftlogpb_raflog_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6c, 0x6f, 0x67, 0x70, 0x62, + 0x2f, 0x72, 0x61, 0x66, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x72, + 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x22, 0x53, 0x0a, 0x0c, 0x52, 0x61, 0x66, 0x74, 0x4c, + 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2f, 0x0a, 0x0f, + 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2a, 0x8d, 0x01, + 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x14, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x4d, 0x41, + 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x44, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, + 0x4b, 0x10, 0x01, 0x12, 0x2c, 0x0a, 0x28, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x50, 0x4f, 0x4c, 0x4c, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4a, 0x4f, 0x42, 0x53, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x10, + 0x02, 0x12, 0x1a, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x10, 0xe4, 0x20, 0x42, 0x98, 0x01, + 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x42, 0x0b, + 0x52, 0x61, 0x66, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, + 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6c, 0x6f, 0x67, 0x70, 0x62, 0xa2, 0x02, + 0x03, 0x52, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0xca, 0x02, + 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0xe2, 0x02, 0x13, 0x52, 0x61, 0x66, 0x74, 0x4c, + 0x6f, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_experiment_metastore_raftlogpb_raflog_proto_rawDescOnce sync.Once + file_experiment_metastore_raftlogpb_raflog_proto_rawDescData = file_experiment_metastore_raftlogpb_raflog_proto_rawDesc +) + +func file_experiment_metastore_raftlogpb_raflog_proto_rawDescGZIP() []byte { + file_experiment_metastore_raftlogpb_raflog_proto_rawDescOnce.Do(func() { + file_experiment_metastore_raftlogpb_raflog_proto_rawDescData = protoimpl.X.CompressGZIP(file_experiment_metastore_raftlogpb_raflog_proto_rawDescData) + }) + return file_experiment_metastore_raftlogpb_raflog_proto_rawDescData +} + +var file_experiment_metastore_raftlogpb_raflog_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_experiment_metastore_raftlogpb_raflog_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_experiment_metastore_raftlogpb_raflog_proto_goTypes = []any{ + (CommandType)(0), // 0: raft_log.CommandType + (*RaftLogEntry)(nil), // 1: raft_log.RaftLogEntry + (*TruncateCommand)(nil), // 2: raft_log.TruncateCommand +} +var file_experiment_metastore_raftlogpb_raflog_proto_depIdxs = []int32{ + 0, // 0: raft_log.RaftLogEntry.type:type_name -> raft_log.CommandType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_experiment_metastore_raftlogpb_raflog_proto_init() } +func file_experiment_metastore_raftlogpb_raflog_proto_init() { + if File_experiment_metastore_raftlogpb_raflog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RaftLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_experiment_metastore_raftlogpb_raflog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*TruncateCommand); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_experiment_metastore_raftlogpb_raflog_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_experiment_metastore_raftlogpb_raflog_proto_goTypes, + DependencyIndexes: file_experiment_metastore_raftlogpb_raflog_proto_depIdxs, + EnumInfos: file_experiment_metastore_raftlogpb_raflog_proto_enumTypes, + MessageInfos: file_experiment_metastore_raftlogpb_raflog_proto_msgTypes, + }.Build() + File_experiment_metastore_raftlogpb_raflog_proto = out.File + file_experiment_metastore_raftlogpb_raflog_proto_rawDesc = nil + file_experiment_metastore_raftlogpb_raflog_proto_goTypes = nil + file_experiment_metastore_raftlogpb_raflog_proto_depIdxs = nil +} diff --git a/pkg/experiment/metastore/raftlogpb/raflog.proto b/pkg/experiment/metastore/raftlogpb/raflog.proto new file mode 100644 index 0000000000..fa41df2fd9 --- /dev/null +++ b/pkg/experiment/metastore/raftlogpb/raflog.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package raft_log; + +message RaftLogEntry { + CommandType type = 1; + bytes payload = 2; +} + +enum CommandType { + COMMAND_TYPE_UNKNOWN = 0; + COMMAND_TYPE_ADD_BLOCK = 1; + COMMAND_TYPE_POLL_COMPACTION_JOBS_STATUS = 2; + + // This is a temporary solution. + COMMAND_TYPE_TRUNCATE = 4196; +} + +message TruncateCommand { + uint64 timestamp = 1; +} diff --git a/pkg/experiment/metastore/raftlogpb/raflog_vtproto.pb.go b/pkg/experiment/metastore/raftlogpb/raflog_vtproto.pb.go new file mode 100644 index 0000000000..283c760666 --- /dev/null +++ b/pkg/experiment/metastore/raftlogpb/raflog_vtproto.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: experiment/metastore/raftlogpb/raflog.proto + +package raftlogpb + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *RaftLogEntry) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftLogEntry) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RaftLogEntry) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TruncateCommand) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TruncateCommand) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TruncateCommand) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RaftLogEntry) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TruncateCommand) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *RaftLogEntry) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftLogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftLogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CommandType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TruncateCommand) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TruncateCommand: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TruncateCommand: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/experiment/querybackend/backend.go b/pkg/experiment/querybackend/backend.go new file mode 100644 index 0000000000..40ec1dd132 --- /dev/null +++ b/pkg/experiment/querybackend/backend.go @@ -0,0 +1,146 @@ +package querybackend + +import ( + "context" + "flag" + "fmt" + + "github.com/go-kit/log" + "github.com/grafana/dskit/grpcclient" + "github.com/grafana/dskit/services" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/queryplan" + "github.com/grafana/pyroscope/pkg/iter" + "github.com/grafana/pyroscope/pkg/util" +) + +const defaultConcurrencyLimit = 25 + +type Config struct { + Address string `yaml:"address"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the gRPC client used to communicate between the query-frontends and the query-schedulers."` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Address, "query-backend.address", "localhost:9095", "") + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-backend.grpc-client-config", f) +} + +func (cfg *Config) Validate() error { + if cfg.Address == "" { + return fmt.Errorf("query-backend.address is required") + } + return cfg.GRPCClientConfig.Validate() +} + +type QueryHandler interface { + Invoke(context.Context, *querybackendv1.InvokeRequest) (*querybackendv1.InvokeResponse, error) +} + +type QueryBackend struct { + service services.Service + querybackendv1.QueryBackendServiceServer + + config Config + logger log.Logger + reg prometheus.Registerer + + backendClient QueryHandler + blockReader QueryHandler + + concurrency uint32 + running atomic.Uint32 +} + +func New( + config Config, + logger log.Logger, + reg prometheus.Registerer, + backendClient QueryHandler, + blockReader QueryHandler, +) (*QueryBackend, error) { + q := QueryBackend{ + config: config, + logger: logger, + reg: reg, + backendClient: backendClient, + blockReader: blockReader, + + concurrency: defaultConcurrencyLimit, + } + q.service = services.NewIdleService(q.starting, q.stopping) + return &q, nil +} + +func (q *QueryBackend) Service() services.Service { return q.service } +func (q *QueryBackend) starting(context.Context) error { return nil } +func (q *QueryBackend) stopping(error) error { return nil } + +func (q *QueryBackend) Invoke( + ctx context.Context, + req *querybackendv1.InvokeRequest, +) (*querybackendv1.InvokeResponse, error) { + span, ctx := opentracing.StartSpanFromContext(ctx, "QueryBackend.Invoke") + defer span.Finish() + + p := queryplan.Open(req.QueryPlan) + switch r := p.Root(); r.Type { + case queryplan.NodeMerge: + return q.merge(ctx, req, r.Children()) + case queryplan.NodeRead: + return q.withThrottling(func() (*querybackendv1.InvokeResponse, error) { + return q.read(ctx, req, r.Blocks()) + }) + default: + panic("query plan: unknown node type") + } +} + +func (q *QueryBackend) merge( + ctx context.Context, + request *querybackendv1.InvokeRequest, + children iter.Iterator[*queryplan.Node], +) (*querybackendv1.InvokeResponse, error) { + request.QueryPlan = nil + m := newAggregator(request) + g, ctx := errgroup.WithContext(ctx) + for children.Next() { + req := request.CloneVT() + req.QueryPlan = children.At().Plan().Proto() + g.Go(util.RecoverPanic(func() error { + // TODO: Speculative retry. + return m.aggregateResponse(q.backendClient.Invoke(ctx, req)) + })) + } + if err := g.Wait(); err != nil { + return nil, err + } + return m.response() +} + +func (q *QueryBackend) read( + ctx context.Context, + request *querybackendv1.InvokeRequest, + blocks iter.Iterator[*metastorev1.BlockMeta], +) (*querybackendv1.InvokeResponse, error) { + request.QueryPlan = &querybackendv1.QueryPlan{ + Blocks: iter.MustSlice(blocks), + } + return q.blockReader.Invoke(ctx, request) +} + +func (q *QueryBackend) withThrottling(fn func() (*querybackendv1.InvokeResponse, error)) (*querybackendv1.InvokeResponse, error) { + if q.running.Inc() > q.concurrency { + return nil, status.Error(codes.ResourceExhausted, "all minions are busy, please try later") + } + defer q.running.Dec() + return fn() +} diff --git a/pkg/experiment/querybackend/block/compaction.go b/pkg/experiment/querybackend/block/compaction.go new file mode 100644 index 0000000000..0c5eb7b043 --- /dev/null +++ b/pkg/experiment/querybackend/block/compaction.go @@ -0,0 +1,606 @@ +package block + +import ( + "context" + "crypto/rand" + "fmt" + "os" + "path/filepath" + "slices" + "sort" + "strings" + "sync" + "time" + + "github.com/grafana/dskit/multierror" + "github.com/oklog/ulid" + "github.com/parquet-go/parquet-go" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + "golang.org/x/sync/errgroup" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/phlaredb/block" + schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" + "github.com/grafana/pyroscope/pkg/util" +) + +var ( + ErrNoBlocksToMerge = fmt.Errorf("no blocks to merge") + ErrShardMergeMismatch = fmt.Errorf("only blocks from the same shard can be merged") +) + +type CompactionOption func(*compactionConfig) + +func WithCompactionObjectOptions(options ...ObjectOption) CompactionOption { + return func(p *compactionConfig) { + p.objectOptions = append(p.objectOptions, options...) + } +} + +func WithCompactionTempDir(tempdir string) CompactionOption { + return func(p *compactionConfig) { + p.tempdir = tempdir + } +} + +func WithCompactionDestination(storage objstore.Bucket) CompactionOption { + return func(p *compactionConfig) { + p.destination = storage + } +} + +type compactionConfig struct { + objectOptions []ObjectOption + tempdir string + source objstore.BucketReader + destination objstore.Bucket +} + +func Compact( + ctx context.Context, + blocks []*metastorev1.BlockMeta, + storage objstore.Bucket, + options ...CompactionOption, +) (m []*metastorev1.BlockMeta, err error) { + c := &compactionConfig{ + tempdir: os.TempDir(), + source: storage, + destination: storage, + } + for _, option := range options { + option(c) + } + + objects := ObjectsFromMetas(storage, blocks, c.objectOptions...) + plan, err := PlanCompaction(objects) + if err != nil { + return nil, err + } + + if err = objects.Open(ctx); err != nil { + return nil, err + } + defer func() { + err = multierror.New(err, objects.Close()).Err() + }() + + compacted := make([]*metastorev1.BlockMeta, 0, len(plan)) + for _, p := range plan { + md, compactionErr := p.Compact(ctx, c.destination, c.tempdir) + if compactionErr != nil { + return nil, compactionErr + } + compacted = append(compacted, md) + } + + return compacted, nil +} + +func PlanCompaction(objects Objects) ([]*CompactionPlan, error) { + if len(objects) == 0 { + // Even if there's just a single object, we still need to rewrite it. + return nil, ErrNoBlocksToMerge + } + + r := objects[0] + var c uint32 + for _, obj := range objects { + if r.meta.Shard != obj.meta.Shard { + return nil, ErrShardMergeMismatch + } + c = max(c, obj.meta.CompactionLevel) + } + c++ + + m := make(map[string]*CompactionPlan) + for _, obj := range objects { + for _, s := range obj.meta.TenantServices { + tm, ok := m[s.TenantId] + if !ok { + tm = newBlockCompaction(s.TenantId, r.meta.Shard, c) + m[s.TenantId] = tm + } + sm := tm.addTenantService(s) + // Bind objects to services. + sm.append(NewTenantService(s, obj)) + } + } + + ordered := make([]*CompactionPlan, 0, len(m)) + for _, tm := range m { + ordered = append(ordered, tm) + slices.SortFunc(tm.services, func(a, b *tenantServiceCompaction) int { + return strings.Compare(a.meta.Name, b.meta.Name) + }) + } + slices.SortFunc(ordered, func(a, b *CompactionPlan) int { + return strings.Compare(a.tenantID, b.tenantID) + }) + + return ordered, nil +} + +type CompactionPlan struct { + tenantID string + serviceMap map[string]*tenantServiceCompaction + services []*tenantServiceCompaction + meta *metastorev1.BlockMeta +} + +func newBlockCompaction(tenantID string, shard uint32, compactionLevel uint32) *CompactionPlan { + return &CompactionPlan{ + tenantID: tenantID, + serviceMap: make(map[string]*tenantServiceCompaction), + meta: &metastorev1.BlockMeta{ + FormatVersion: 1, + // TODO(kolesnikovae): Make it deterministic? + Id: ulid.MustNew(uint64(time.Now().UnixMilli()), rand.Reader).String(), + TenantId: tenantID, + Shard: shard, + CompactionLevel: compactionLevel, + TenantServices: nil, + MinTime: 0, + MaxTime: 0, + Size: 0, + }, + } +} + +func (b *CompactionPlan) Estimate() { + // TODO(kolesnikovae): Implement. +} + +func (b *CompactionPlan) Compact(ctx context.Context, dst objstore.Bucket, tmpdir string) (m *metastorev1.BlockMeta, err error) { + w := NewBlockWriter(dst, ObjectPath(b.meta), tmpdir) + defer func() { + err = multierror.New(err, w.Close()).Err() + }() + // Services are compacted in a strict order. + for _, s := range b.services { + s.estimate() + // TODO(kolesnikovae): Wait until the required resources are available? + if err = s.compact(ctx, w); err != nil { + return nil, fmt.Errorf("compacting block: %w", err) + } + b.meta.TenantServices = append(b.meta.TenantServices, s.meta) + } + if err = w.Flush(ctx); err != nil { + return nil, fmt.Errorf("flushing block writer: %w", err) + } + b.meta.Size = w.Offset() + return b.meta, nil +} + +func (b *CompactionPlan) addTenantService(s *metastorev1.TenantService) *tenantServiceCompaction { + sm, ok := b.serviceMap[s.Name] + if !ok { + sm = newTenantServiceCompaction(s.TenantId, s.Name) + b.serviceMap[s.Name] = sm + b.services = append(b.services, sm) + } + if b.meta.MinTime == 0 || s.MinTime < b.meta.MinTime { + b.meta.MinTime = s.MinTime + } + if s.MaxTime > b.meta.MaxTime { + b.meta.MaxTime = s.MaxTime + } + return sm +} + +type compactionEstimates struct { + inMemorySizeInputSymbols int64 + inMemorySizeInputIndex int64 + inMemorySizeInputProfiles int64 + + inMemorySizeOutputSymbols int64 + inMemorySizeOutputIndex int64 + inMemorySizeOutputProfiles int64 + + outputSizeIndex int64 + outputSizeSymbols int64 + outputSizeProfiles int64 +} + +func (m *compactionEstimates) inMemorySizeTotal() int64 { + return m.inMemorySizeInputSymbols + + m.inMemorySizeInputIndex + + m.inMemorySizeInputProfiles + + m.inMemorySizeOutputSymbols + + m.inMemorySizeOutputIndex + + m.inMemorySizeOutputProfiles +} + +type tenantServiceCompaction struct { + meta *metastorev1.TenantService + ptypes map[string]struct{} + path string // Set at open. + + services []*TenantService + + indexRewriter *indexRewriter + symbolsRewriter *symbolsRewriter + profilesWriter *profilesWriter + + estimates compactionEstimates + samples uint64 + series uint64 + profiles uint64 + + flushOnce sync.Once +} + +func newTenantServiceCompaction(tenantID, name string) *tenantServiceCompaction { + return &tenantServiceCompaction{ + ptypes: make(map[string]struct{}, 10), + meta: &metastorev1.TenantService{ + TenantId: tenantID, + Name: name, + // Updated at append. + MinTime: 0, + MaxTime: 0, + // Updated at writeTo. + TableOfContents: nil, + Size: 0, + ProfileTypes: nil, + }, + } +} + +func (m *tenantServiceCompaction) append(s *TenantService) { + m.services = append(m.services, s) + if m.meta.MinTime == 0 || s.meta.MinTime < m.meta.MinTime { + m.meta.MinTime = s.meta.MinTime + } + if s.meta.MaxTime > m.meta.MaxTime { + m.meta.MaxTime = s.meta.MaxTime + } + for _, pt := range s.meta.ProfileTypes { + m.ptypes[pt] = struct{}{} + } +} + +func (m *tenantServiceCompaction) compact(ctx context.Context, w *Writer) (err error) { + if err = m.open(ctx, w.Dir()); err != nil { + return fmt.Errorf("failed to open sections for compaction: %w", err) + } + defer func() { + err = multierror.New(err, m.cleanup()).Err() + }() + if err = m.mergeAndClose(ctx); err != nil { + return fmt.Errorf("failed to merge profiles: %w", err) + } + if err = m.writeTo(w); err != nil { + return fmt.Errorf("failed to write sections: %w", err) + } + return nil +} + +// TODO(kolesnikovae): +// - Add statistics to the block meta. +// - Measure. Ideally, we should track statistics. +func (m *tenantServiceCompaction) estimate() { + columns := len(schemav1.ProfilesSchema.Columns()) + // Services are to be opened concurrently. + for _, s := range m.services { + s1 := s.sectionSize(SectionSymbols) + // It's likely that both symbols and tsdb sections will + // be heavily deduplicated, so the actual output size will + // be smaller than we estimate – to be deduced later. + m.estimates.outputSizeSymbols += s1 + // Both the symbols and the tsdb are loaded into memory entirely. + // It's multiplied here according to experiments. + // https://gist.github.com/kolesnikovae/6f7bdc0b8a14174a8e63485300144b4a + m.estimates.inMemorySizeInputSymbols += s1 * 3 // Pessimistic estimate. + + s2 := s.sectionSize(SectionTSDB) + m.estimates.outputSizeIndex += s2 + // TSDB index is loaded into memory entirely, but is not decoded. + m.estimates.inMemorySizeInputIndex += int64(nextPowerOfTwo(uint32(s2))) + + s3 := s.sectionSize(SectionProfiles) + m.estimates.outputSizeProfiles += s3 + // All columns are to be opened. + // Assuming async read mode – 2 buffers per column: + m.estimates.inMemorySizeInputProfiles += int64(2 * columns * estimateReadBufferSize(s3)) + } + const symbolsDuplicationRatio = 0.5 // Two blocks are likely to have a half of symbols in common. + m.estimates.outputSizeSymbols = int64(float64(m.estimates.outputSizeSymbols) * symbolsDuplicationRatio) + // Duplication of series and profiles is ignored. + + // Output block memory footprint. + m.estimates.inMemorySizeOutputIndex = m.estimates.outputSizeIndex * 8 // A guess. We keep all labels in memory. + m.estimates.inMemorySizeOutputSymbols += m.estimates.outputSizeProfiles * 4 // Mind the lookup table of rewriter. + // This is the most difficult part to estimate. + // Parquet keeps ALL RG pages in memory. We have a limit of 10K rows per RG, + // therefore it's very likely, that the whole table will be loaded into memory, + // plus overhead of memory fragmentation. It's likely impossible to have a + // reasonable estimate here. + const rowSizeGuess = 2 << 10 + // Worst case should be appx ~32MB. If a doubled estimated output size is less than that, use it. + columnBuffers := int64(nextPowerOfTwo(maxRowsPerRowGroup * rowSizeGuess)) + if s := 2 * m.estimates.outputSizeProfiles; s < columnBuffers { + columnBuffers = s + } + pageBuffers := int64(columns * estimatePageBufferSize(m.estimates.outputSizeProfiles)) + m.estimates.inMemorySizeOutputProfiles += columnBuffers + pageBuffers +} + +func (m *tenantServiceCompaction) open(ctx context.Context, path string) (err error) { + m.path = path + defer func() { + if err != nil { + err = multierror.New(err, m.cleanup()).Err() + } + }() + + if err = os.MkdirAll(m.path, 0o777); err != nil { + return err + } + + m.profilesWriter, err = newProfileWriter(m.path, m.estimates.outputSizeProfiles) + if err != nil { + return err + } + + m.indexRewriter = newIndexRewriter(m.path) + m.symbolsRewriter = newSymbolsRewriter(m.path) + + g, ctx := errgroup.WithContext(ctx) + for _, s := range m.services { + s := s + g.Go(util.RecoverPanic(func() error { + if openErr := s.Open(ctx, allSections...); openErr != nil { + return fmt.Errorf("opening tenant service (block %s): %w", s.obj.path, openErr) + } + return nil + })) + } + if err = g.Wait(); err != nil { + merr := multierror.New(err) + for _, s := range m.services { + merr.Add(s.Close()) + } + return merr.Err() + } + + return nil +} + +func (m *tenantServiceCompaction) mergeAndClose(ctx context.Context) (err error) { + defer func() { + err = multierror.New(err, m.close()).Err() + }() + return m.merge(ctx) +} + +func (m *tenantServiceCompaction) merge(ctx context.Context) (err error) { + rows, err := NewMergeRowProfileIterator(m.services) + if err != nil { + return err + } + defer func() { + err = multierror.New(err, rows.Close()).Err() + }() + var i int + for rows.Next() { + if i++; i%1000 == 0 { + if err = ctx.Err(); err != nil { + return err + } + } + if err = m.writeRow(rows.At()); err != nil { + return err + } + } + return rows.Err() +} + +func (m *tenantServiceCompaction) writeRow(r ProfileEntry) (err error) { + if err = m.indexRewriter.rewriteRow(r); err != nil { + return err + } + if err = m.symbolsRewriter.rewriteRow(r); err != nil { + return err + } + return m.profilesWriter.writeRow(r) +} + +func (m *tenantServiceCompaction) close() (err error) { + m.flushOnce.Do(func() { + merr := multierror.New() + merr.Add(m.symbolsRewriter.Flush()) + merr.Add(m.indexRewriter.Flush()) + merr.Add(m.profilesWriter.Close()) + m.samples = m.symbolsRewriter.samples + m.series = m.indexRewriter.NumSeries() + m.profiles = m.profilesWriter.profiles + m.symbolsRewriter = nil + m.indexRewriter = nil + m.profilesWriter = nil + // Note that m.services are closed by merge + // iterator as they reach the end of the profile + // table. We do it here again just in case. + // TODO(kolesnikovae): Double check error handling. + m.services = nil + err = merr.Err() + }) + return err +} + +func (m *tenantServiceCompaction) writeTo(w *Writer) (err error) { + off := w.Offset() + m.meta.TableOfContents, err = w.ReadFromFiles( + FileNameProfilesParquet, + block.IndexFilename, + symdb.DefaultFileName, + ) + if err != nil { + return err + } + m.meta.Size = w.Offset() - off + m.meta.ProfileTypes = make([]string, 0, len(m.ptypes)) + for pt := range m.ptypes { + m.meta.ProfileTypes = append(m.meta.ProfileTypes, pt) + } + sort.Strings(m.meta.ProfileTypes) + return nil +} + +func (m *tenantServiceCompaction) cleanup() error { + return os.RemoveAll(m.path) +} + +func newIndexRewriter(path string) *indexRewriter { + return &indexRewriter{ + symbols: make(map[string]struct{}), + path: path, + } +} + +type indexRewriter struct { + series []seriesLabels + symbols map[string]struct{} + chunks []index.ChunkMeta // one chunk per series + previousFp model.Fingerprint + + path string +} + +type seriesLabels struct { + labels phlaremodel.Labels + fingerprint model.Fingerprint +} + +func (rw *indexRewriter) rewriteRow(e ProfileEntry) error { + if rw.previousFp != e.Fingerprint || len(rw.series) == 0 { + series := e.Labels.Clone() + for _, l := range series { + rw.symbols[l.Name] = struct{}{} + rw.symbols[l.Value] = struct{}{} + } + rw.series = append(rw.series, seriesLabels{ + labels: series, + fingerprint: e.Fingerprint, + }) + rw.chunks = append(rw.chunks, index.ChunkMeta{ + MinTime: e.Timestamp, + MaxTime: e.Timestamp, + SeriesIndex: uint32(len(rw.series) - 1), + }) + rw.previousFp = e.Fingerprint + } + rw.chunks[len(rw.chunks)-1].MaxTime = e.Timestamp + e.Row.SetSeriesIndex(rw.chunks[len(rw.chunks)-1].SeriesIndex) + return nil +} + +func (rw *indexRewriter) NumSeries() uint64 { return uint64(len(rw.series)) } + +func (rw *indexRewriter) Flush() error { + w, err := index.NewWriterSize(context.Background(), + filepath.Join(rw.path, block.IndexFilename), + // There is no particular reason to use a buffer (bufio.Writer) + // larger than the default one when writing on disk + 4<<10) + if err != nil { + return err + } + + // Sort symbols + symbols := make([]string, 0, len(rw.symbols)) + for s := range rw.symbols { + symbols = append(symbols, s) + } + sort.Strings(symbols) + + // Add symbols + for _, symbol := range symbols { + if err = w.AddSymbol(symbol); err != nil { + return err + } + } + + // Add Series + for i, series := range rw.series { + if err = w.AddSeries(storage.SeriesRef(i), series.labels, series.fingerprint, rw.chunks[i]); err != nil { + return err + } + } + + return w.Close() +} + +type symbolsRewriter struct { + w *symdb.SymDB + rw map[*TenantService]*symdb.Rewriter + samples uint64 + + stacktraces []uint32 +} + +func newSymbolsRewriter(path string) *symbolsRewriter { + return &symbolsRewriter{ + rw: make(map[*TenantService]*symdb.Rewriter), + w: symdb.NewSymDB(symdb.DefaultConfig(). + WithVersion(symdb.FormatV3). + WithDirectory(path)), + } +} + +func (s *symbolsRewriter) rewriteRow(e ProfileEntry) (err error) { + rw := s.rewriterFor(e.TenantService) + e.Row.ForStacktraceIDsValues(func(values []parquet.Value) { + s.loadStacktraceIDs(values) + if err = rw.Rewrite(e.Row.StacktracePartitionID(), s.stacktraces); err != nil { + return + } + s.samples += uint64(len(values)) + for i, v := range values { + values[i] = parquet.Int64Value(int64(s.stacktraces[i])).Level(v.RepetitionLevel(), v.DefinitionLevel(), v.Column()) + } + }) + return err +} + +func (s *symbolsRewriter) rewriterFor(x *TenantService) *symdb.Rewriter { + rw, ok := s.rw[x] + if !ok { + rw = symdb.NewRewriter(s.w, x.Symbols()) + s.rw[x] = rw + } + return rw +} + +func (s *symbolsRewriter) loadStacktraceIDs(values []parquet.Value) { + s.stacktraces = slices.Grow(s.stacktraces[0:], len(values))[:len(values)] + for i := range values { + s.stacktraces[i] = values[i].Uint32() + } +} + +func (s *symbolsRewriter) Flush() error { return s.w.Flush() } diff --git a/pkg/experiment/querybackend/block/compaction_test.go b/pkg/experiment/querybackend/block/compaction_test.go new file mode 100644 index 0000000000..37de05f0d5 --- /dev/null +++ b/pkg/experiment/querybackend/block/compaction_test.go @@ -0,0 +1,38 @@ +package block + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + + compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" + "github.com/grafana/pyroscope/pkg/objstore/testutil" +) + +func Test_CompactBlocks(t *testing.T) { + ctx := context.Background() + bucket, _ := testutil.NewFilesystemBucket(t, ctx, "testdata") + + var blockMetas compactorv1.CompletedJob // same contract, can break in the future + blockMetasData, err := os.ReadFile("testdata/block-metas.json") + require.NoError(t, err) + err = protojson.Unmarshal(blockMetasData, &blockMetas) + require.NoError(t, err) + + dst, tempdir := testutil.NewFilesystemBucket(t, ctx, t.TempDir()) + compactedBlocks, err := Compact(ctx, blockMetas.Blocks, bucket, + WithCompactionDestination(dst), + WithCompactionTempDir(tempdir), + WithCompactionObjectOptions( + WithObjectDownload(filepath.Join(tempdir, "source")), + WithObjectMaxSizeLoadInMemory(0)), // Force download. + ) + + require.NoError(t, err) + require.Len(t, compactedBlocks, 1) + // TODO: Assertions. +} diff --git a/pkg/experiment/querybackend/block/constants.go b/pkg/experiment/querybackend/block/constants.go new file mode 100644 index 0000000000..bba95e2164 --- /dev/null +++ b/pkg/experiment/querybackend/block/constants.go @@ -0,0 +1,82 @@ +package block + +import ( + "github.com/grafana/pyroscope/pkg/tenant" +) + +const ( + DirPathSegment = "segments/" + DirPathBlock = "blocks/" + DirNameAnonTenant = tenant.DefaultTenantID + + FileNameProfilesParquet = "profiles.parquet" + FileNameDataObject = "block.bin" +) + +const ( + defaultObjectSizeLoadInMemory = 1 << 20 + defaultTenantServiceSizeLoadInMemory = 1 << 20 + + maxRowsPerRowGroup = 10 << 10 + symbolsPrefetchSize = 32 << 10 + compactionCopyBufferSize = 32 << 10 +) + +func estimateReadBufferSize(s int64) int { + const minSize = 64 << 10 + const maxSize = 1 << 20 + // Parquet has global buffer map, where buffer size is key, + // so we want a low cardinality here. + e := nextPowerOfTwo(uint32(s / 10)) + if e < minSize { + return minSize + } + return int(min(e, maxSize)) +} + +// This is a verbatim copy of estimateReadBufferSize. +// It's kept for the sake of clarity and to avoid confusion. +func estimatePageBufferSize(s int64) int { + const minSize = 64 << 10 + const maxSize = 1 << 20 + e := nextPowerOfTwo(uint32(s / 10)) + if e < minSize { + return minSize + } + return int(min(e, maxSize)) +} + +func estimateFooterSize(size int64) int64 { + var s int64 + // as long as we don't keep the exact footer sizes in the meta estimate it + if size > 0 { + s = size / 10000 + } + // set a minimum footer size of 32KiB + if s < 32<<10 { + s = 32 << 10 + } + // set a maximum footer size of 512KiB + if s > 512<<10 { + s = 512 << 10 + } + // now check clamp it to the actual size of the whole object + if s > size { + s = size + } + return s +} + +func nextPowerOfTwo(n uint32) uint32 { + if n == 0 { + return 1 + } + n-- + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n++ + return n +} diff --git a/pkg/experiment/querybackend/block/object.go b/pkg/experiment/querybackend/block/object.go new file mode 100644 index 0000000000..6900e61f22 --- /dev/null +++ b/pkg/experiment/querybackend/block/object.go @@ -0,0 +1,243 @@ +package block + +import ( + "context" + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/grafana/dskit/multierror" + "golang.org/x/sync/errgroup" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/util" + "github.com/grafana/pyroscope/pkg/util/bufferpool" + "github.com/grafana/pyroscope/pkg/util/refctr" +) + +// TODO Next: +// - Buffer pool. +// - In-memory threshold option. +// - Store the object size in metadata. +// - Separate storages for segments and compacted blocks. +// - Local cache? Useful for all-in-one deployments. +// - Distributed cache. + +type Section uint32 + +const ( + // Table of contents sections. + _ Section = iota + SectionProfiles + SectionTSDB + SectionSymbols +) + +var allSections = []Section{ + SectionProfiles, + SectionTSDB, + SectionSymbols, +} + +var ( + // Version-specific. + sectionNames = [...][]string{1: {"invalid", "profiles", "tsdb", "symbols"}} + sectionIndices = [...][]int{1: {-1, 0, 1, 2}} +) + +func (sc Section) open(ctx context.Context, s *TenantService) (err error) { + switch sc { + case SectionTSDB: + return openTSDB(ctx, s) + case SectionSymbols: + return openSymbols(ctx, s) + case SectionProfiles: + return openProfileTable(ctx, s) + default: + panic(fmt.Sprintf("bug: unknown section: %d", sc)) + } +} + +// Object represents a block or a segment in the object storage. +type Object struct { + path string + meta *metastorev1.BlockMeta + storage objstore.BucketReader + local *objstore.ReadOnlyFile + + refs refctr.Counter + buf *bufferpool.Buffer + err error + + memSize int + downloadDir string +} + +type ObjectOption func(*Object) + +func WithObjectPath(path string) ObjectOption { + return func(obj *Object) { + obj.path = path + } +} + +func WithObjectMaxSizeLoadInMemory(size int) ObjectOption { + return func(obj *Object) { + obj.memSize = size + } +} + +func WithObjectDownload(dir string) ObjectOption { + return func(obj *Object) { + obj.downloadDir = dir + } +} + +func NewObject(storage objstore.Bucket, meta *metastorev1.BlockMeta, opts ...ObjectOption) *Object { + o := &Object{ + storage: storage, + meta: meta, + path: ObjectPath(meta), + memSize: defaultObjectSizeLoadInMemory, + } + for _, opt := range opts { + opt(o) + } + return o +} + +func ObjectPath(md *metastorev1.BlockMeta) string { + topLevel := DirPathBlock + tenantDirName := md.TenantId + if md.CompactionLevel == 0 { + topLevel = DirPathSegment + tenantDirName = DirNameAnonTenant + } + var b strings.Builder + b.WriteString(topLevel) + b.WriteString(strconv.Itoa(int(md.Shard))) + b.WriteByte('/') + b.WriteString(tenantDirName) + b.WriteByte('/') + b.WriteString(md.Id) + b.WriteByte('/') + b.WriteString(FileNameDataObject) + return b.String() +} + +// Open opens the object, loading the data into memory if it's small enough. +// +// Open may be called multiple times concurrently, but the +// object is only initialized once. While it is possible to open +// the object repeatedly after close, the caller must pass the +// failure reason to the "CloseWithError" call, preventing further +// use, if applicable. +func (obj *Object) Open(ctx context.Context) error { + return obj.refs.IncErr(func() error { + return obj.open(ctx) + }) +} + +func (obj *Object) open(ctx context.Context) (err error) { + if obj.err != nil { + // In case if the object has been already closed with an error, + // and then released, return the error immediately. + return obj.err + } + if len(obj.meta.TenantServices) == 0 { + return nil + } + // Estimate the size of the sections to process, and load the + // data into memory, if it's small enough. + if obj.meta.Size > uint64(obj.memSize) { + // Otherwise, download the object to the local directory, + // if it's specified, and use the local file. + if obj.downloadDir != "" { + return obj.Download(ctx) + } + // The object will be read from the storage directly. + return nil + } + obj.buf = bufferpool.GetBuffer(int(obj.meta.Size)) + defer func() { + if err != nil { + _ = obj.closeErr(err) + } + }() + if err = objstore.ReadRange(ctx, obj.buf, obj.path, obj.storage, 0, int64(obj.meta.Size)); err != nil { + return fmt.Errorf("loading object into memory %s: %w", obj.path, err) + } + return nil +} + +func (obj *Object) Close() error { + return obj.CloseWithError(nil) +} + +// CloseWithError closes the object, releasing all the acquired resources, +// once the last reference is released. If the provided error is not nil, +// the object will be marked as failed, preventing any further use. +func (obj *Object) CloseWithError(err error) (closeErr error) { + obj.refs.Dec(func() { + closeErr = obj.closeErr(err) + }) + return closeErr +} + +func (obj *Object) closeErr(err error) (closeErr error) { + obj.err = err + if obj.buf != nil { + bufferpool.Put(obj.buf) + obj.buf = nil + } + if obj.local != nil { + closeErr = obj.local.Close() + obj.local = nil + } + return closeErr +} + +func (obj *Object) Meta() *metastorev1.BlockMeta { return obj.meta } + +func (obj *Object) Download(ctx context.Context) error { + dir := filepath.Join(obj.downloadDir, obj.meta.Id) + local, err := objstore.Download(ctx, obj.path, obj.storage, dir) + if err != nil { + return err + } + obj.storage = local + obj.local = local + return nil +} + +// ObjectsFromMetas binds block metas to corresponding objects in the storage. +func ObjectsFromMetas(storage objstore.Bucket, blocks []*metastorev1.BlockMeta, options ...ObjectOption) Objects { + objects := make([]*Object, len(blocks)) + for i, m := range blocks { + objects[i] = NewObject(storage, m, options...) + } + return objects +} + +type Objects []*Object + +func (s Objects) Open(ctx context.Context) error { + g, ctx := errgroup.WithContext(ctx) + for i := range s { + i := i + g.Go(util.RecoverPanic(func() error { + return s[i].Open(ctx) + })) + } + return g.Wait() +} + +func (s Objects) Close() error { + var m multierror.MultiError + for i := range s { + m.Add(s[i].Close()) + } + return m.Err() +} diff --git a/pkg/experiment/querybackend/block/section_profiles.go b/pkg/experiment/querybackend/block/section_profiles.go new file mode 100644 index 0000000000..ea16ecb5d1 --- /dev/null +++ b/pkg/experiment/querybackend/block/section_profiles.go @@ -0,0 +1,389 @@ +package block + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "math" + "os" + "path/filepath" + + "github.com/parquet-go/parquet-go" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/grafana/pyroscope/pkg/iter" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/objstore" + phlareparquet "github.com/grafana/pyroscope/pkg/parquet" + "github.com/grafana/pyroscope/pkg/phlaredb" + "github.com/grafana/pyroscope/pkg/phlaredb/query" + schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" + "github.com/grafana/pyroscope/pkg/util/bufferpool" + "github.com/grafana/pyroscope/pkg/util/build" + "github.com/grafana/pyroscope/pkg/util/loser" +) + +func openProfileTable(_ context.Context, s *TenantService) (err error) { + offset := s.sectionOffset(SectionProfiles) + size := s.sectionSize(SectionProfiles) + if buf := s.inMemoryBuffer(); buf != nil { + offset -= int64(s.offset()) + s.profiles, err = openParquetFile( + s.inMemoryBucket(buf), s.obj.path, offset, size, + 0, // Do not prefetch the footer. + parquet.SkipBloomFilters(true), + parquet.FileReadMode(parquet.ReadModeSync), + parquet.ReadBufferSize(4<<10)) + } else { + s.profiles, err = openParquetFile( + s.obj.storage, s.obj.path, offset, size, + estimateFooterSize(size), + parquet.SkipBloomFilters(true), + parquet.FileReadMode(parquet.ReadModeAsync), + parquet.ReadBufferSize(estimateReadBufferSize(size))) + } + if err != nil { + return fmt.Errorf("opening profile parquet table: %w", err) + } + return nil +} + +type ParquetFile struct { + *parquet.File + + reader objstore.ReaderAtCloser + cancel context.CancelFunc + + storage objstore.BucketReader + path string + off int64 + size int64 +} + +func openParquetFile( + storage objstore.BucketReader, + path string, + offset, size, footerSize int64, + options ...parquet.FileOption, +) (p *ParquetFile, err error) { + // The context is used for GetRange calls and should not + // be canceled until the parquet file is closed. + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + p = &ParquetFile{ + cancel: cancel, + storage: storage, + path: path, + off: offset, + size: size, + } + + r, err := storage.ReaderAt(ctx, path) + if err != nil { + return nil, fmt.Errorf("creating object reader: %w", err) + } + + var ra io.ReaderAt + ra = io.NewSectionReader(r, offset, size) + if footerSize > 0 { + buf := bufferpool.GetBuffer(int(footerSize)) + defer func() { + // Footer is not used after the file was opened. + bufferpool.Put(buf) + }() + if err = p.fetchFooter(ctx, buf, footerSize); err != nil { + return nil, err + } + rf := newReaderWithFooter(ra, buf.B, size) + defer rf.free() + ra = rf + } + + f, err := parquet.OpenFile(ra, size, options...) + if err != nil { + return nil, err + } + + p.reader = r + p.File = f + return p, nil +} + +func (f *ParquetFile) RowReader() *parquet.Reader { + return parquet.NewReader(f.File, schemav1.ProfilesSchema) +} + +func (f *ParquetFile) fetchFooter(ctx context.Context, buf *bufferpool.Buffer, estimatedSize int64) error { + // Fetch the footer of estimated size at the estimated offset. + estimatedOffset := f.off + f.size - estimatedSize + if err := objstore.ReadRange(ctx, buf, f.path, f.storage, estimatedOffset, estimatedSize); err != nil { + return err + } + // Footer size is an uint32 located at size-8. + sb := buf.B[len(buf.B)-8 : len(buf.B)-4] + s := int64(binary.LittleEndian.Uint32(sb)) + s += 8 // Include the footer size itself and the magic signature. + if estimatedSize >= s { + // The footer has been fetched. + return nil + } + // Fetch footer to buf for sure. + return objstore.ReadRange(ctx, buf, f.path, f.storage, f.off+f.size-s, s) +} + +func (f *ParquetFile) Close() error { + if f.cancel != nil { + f.cancel() + } + if f.reader != nil { + return f.reader.Close() + } + return nil +} + +func (f *ParquetFile) Column(ctx context.Context, columnName string, predicate query.Predicate) query.Iterator { + idx, _ := query.GetColumnIndexByPath(f.Root(), columnName) + if idx == -1 { + return query.NewErrIterator(fmt.Errorf("column '%s' not found in parquet table", columnName)) + } + return query.NewSyncIterator(ctx, f.RowGroups(), idx, columnName, 1<<10, predicate, columnName) +} + +type profilesWriter struct { + *parquet.GenericWriter[*schemav1.Profile] + file *os.File + buf []parquet.Row + profiles uint64 +} + +func newProfileWriter(dst string, sizeTotal int64) (*profilesWriter, error) { + f, err := os.Create(filepath.Join(dst, FileNameProfilesParquet)) + if err != nil { + return nil, err + } + return &profilesWriter{ + file: f, + buf: make([]parquet.Row, 1), + GenericWriter: parquet.NewGenericWriter[*schemav1.Profile](f, + parquet.CreatedBy("github.com/grafana/pyroscope/", build.Version, build.Revision), + parquet.PageBufferSize(estimatePageBufferSize(sizeTotal)), + // Note that parquet keeps ALL RG pages in memory (ColumnPageBuffers). + parquet.MaxRowsPerRowGroup(maxRowsPerRowGroup), + schemav1.ProfilesSchema, + // parquet.ColumnPageBuffers(), + ), + }, nil +} + +func (p *profilesWriter) writeRow(e ProfileEntry) error { + p.buf[0] = parquet.Row(e.Row) + _, err := p.GenericWriter.WriteRows(p.buf) + p.profiles++ + return err +} + +func (p *profilesWriter) Close() error { + err := p.GenericWriter.Close() + if err != nil { + return err + } + return p.file.Close() +} + +type readerWithFooter struct { + reader io.ReaderAt + footer []byte + offset int64 + size int64 +} + +func newReaderWithFooter(r io.ReaderAt, footer []byte, size int64) *readerWithFooter { + footerSize := int64(len(footer)) + footerOffset := size - footerSize + return &readerWithFooter{ + reader: r, + footer: footer, + offset: footerOffset, + size: footerSize, + } +} + +func (f *readerWithFooter) hitsHeaderMagic(off, length int64) bool { + return off == 0 && length == 4 +} + +func (f *readerWithFooter) hitsFooter(off, length int64) bool { + return length <= f.size && off >= f.offset && off+length <= f.offset+f.size +} + +var parquetMagic = []byte("PAR1") + +func (f *readerWithFooter) free() { + f.footer = nil + f.size = -1 +} + +func (f *readerWithFooter) ReadAt(p []byte, off int64) (n int, err error) { + if f.hitsHeaderMagic(off, int64(len(p))) { + copy(p, parquetMagic) + return len(p), nil + } + if f.hitsFooter(off, int64(len(p))) { + copy(p, f.footer[off-f.offset:]) + return len(p), nil + } + return f.reader.ReadAt(p, off) +} + +type ProfileEntry struct { + TenantService *TenantService + + Timestamp int64 + Fingerprint model.Fingerprint + Labels phlaremodel.Labels + Row schemav1.ProfileRow +} + +func NewMergeRowProfileIterator(src []*TenantService) (iter.Iterator[ProfileEntry], error) { + its := make([]iter.Iterator[ProfileEntry], len(src)) + for i, s := range src { + it, err := NewProfileRowIterator(s) + if err != nil { + return nil, err + } + its[i] = it + } + if len(its) == 1 { + return its[0], nil + } + return &DedupeProfileRowIterator{ + Iterator: iter.NewTreeIterator(loser.New( + its, + ProfileEntry{ + Timestamp: math.MaxInt64, + }, + func(it iter.Iterator[ProfileEntry]) ProfileEntry { return it.At() }, + func(r1, r2 ProfileEntry) bool { + // first handle max profileRow if it's either r1 or r2 + if r1.Timestamp == math.MaxInt64 { + return false + } + if r2.Timestamp == math.MaxInt64 { + return true + } + // then handle normal profileRows + if cmp := phlaremodel.CompareLabelPairs(r1.Labels, r2.Labels); cmp != 0 { + return cmp < 0 + } + return r1.Timestamp < r2.Timestamp + }, + func(it iter.Iterator[ProfileEntry]) { _ = it.Close() }, + )), + }, nil +} + +type DedupeProfileRowIterator struct { + iter.Iterator[ProfileEntry] + + prevFP model.Fingerprint + prevTimeNanos int64 +} + +func (it *DedupeProfileRowIterator) Next() bool { + for { + if !it.Iterator.Next() { + return false + } + currentProfile := it.Iterator.At() + if it.prevFP == currentProfile.Fingerprint && it.prevTimeNanos == currentProfile.Timestamp { + // skip duplicate profile + continue + } + it.prevFP = currentProfile.Fingerprint + it.prevTimeNanos = currentProfile.Timestamp + return true + } +} + +type profileRowIterator struct { + reader *TenantService + index phlaredb.IndexReader + profiles iter.Iterator[parquet.Row] + allPostings index.Postings + err error + + currentRow ProfileEntry + currentSeriesIdx uint32 + chunks []index.ChunkMeta +} + +func NewProfileRowIterator(s *TenantService) (iter.Iterator[ProfileEntry], error) { + k, v := index.AllPostingsKey() + tsdb := s.Index() + allPostings, err := tsdb.Postings(k, nil, v) + if err != nil { + return nil, err + } + return &profileRowIterator{ + reader: s, + index: tsdb, + profiles: phlareparquet.NewBufferedRowReaderIterator(s.ProfileRowReader(), 4), + allPostings: allPostings, + currentSeriesIdx: math.MaxUint32, + chunks: make([]index.ChunkMeta, 1), + }, nil +} + +func (p *profileRowIterator) At() ProfileEntry { + return p.currentRow +} + +func (p *profileRowIterator) Next() bool { + if !p.profiles.Next() { + return false + } + p.currentRow.TenantService = p.reader + p.currentRow.Row = schemav1.ProfileRow(p.profiles.At()) + seriesIndex := p.currentRow.Row.SeriesIndex() + p.currentRow.Timestamp = p.currentRow.Row.TimeNanos() + // do we have a new series? + if seriesIndex == p.currentSeriesIdx { + return true + } + p.currentSeriesIdx = seriesIndex + if !p.allPostings.Next() { + if err := p.allPostings.Err(); err != nil { + p.err = err + return false + } + p.err = errors.New("unexpected end of postings") + return false + } + + fp, err := p.index.Series(p.allPostings.At(), &p.currentRow.Labels, &p.chunks) + if err != nil { + p.err = err + return false + } + p.currentRow.Fingerprint = model.Fingerprint(fp) + return true +} + +func (p *profileRowIterator) Err() error { + if p.err != nil { + return p.err + } + return p.profiles.Err() +} + +func (p *profileRowIterator) Close() error { + return p.reader.Close() +} diff --git a/pkg/experiment/querybackend/block/section_symbols.go b/pkg/experiment/querybackend/block/section_symbols.go new file mode 100644 index 0000000000..0b8701ef2d --- /dev/null +++ b/pkg/experiment/querybackend/block/section_symbols.go @@ -0,0 +1,24 @@ +package block + +import ( + "context" + "fmt" + + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" +) + +func openSymbols(ctx context.Context, s *TenantService) (err error) { + offset := s.sectionOffset(SectionSymbols) + size := s.sectionSize(SectionSymbols) + if buf := s.inMemoryBuffer(); buf != nil { + offset -= int64(s.offset()) + s.symbols, err = symdb.OpenObject(ctx, s.inMemoryBucket(buf), s.obj.path, offset, size) + } else { + s.symbols, err = symdb.OpenObject(ctx, s.obj.storage, s.obj.path, offset, size, + symdb.WithPrefetchSize(symbolsPrefetchSize)) + } + if err != nil { + return fmt.Errorf("opening symbols: %w", err) + } + return nil +} diff --git a/pkg/experiment/querybackend/block/section_tsdb.go b/pkg/experiment/querybackend/block/section_tsdb.go new file mode 100644 index 0000000000..cc365a48b2 --- /dev/null +++ b/pkg/experiment/querybackend/block/section_tsdb.go @@ -0,0 +1,49 @@ +package block + +import ( + "context" + "fmt" + + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" + "github.com/grafana/pyroscope/pkg/util/bufferpool" +) + +func openTSDB(ctx context.Context, s *TenantService) (err error) { + offset := s.sectionOffset(SectionTSDB) + size := s.sectionSize(SectionTSDB) + s.tsdb = new(tsdbBuffer) + defer func() { + if err != nil { + _ = s.tsdb.Close() + } + }() + if buf := s.inMemoryBuffer(); buf != nil { + offset -= int64(s.offset()) + s.tsdb.index, err = index.NewReader(index.RealByteSlice(buf[offset : offset+size])) + } else { + s.tsdb.buf = bufferpool.GetBuffer(int(size)) + if err = objstore.ReadRange(ctx, s.tsdb.buf, s.obj.path, s.obj.storage, offset, size); err == nil { + s.tsdb.index, err = index.NewReader(index.RealByteSlice(s.tsdb.buf.B)) + } + } + if err != nil { + return fmt.Errorf("opening tsdb: %w", err) + } + return nil +} + +type tsdbBuffer struct { + index *index.Reader + buf *bufferpool.Buffer +} + +func (b *tsdbBuffer) Close() (err error) { + if b.buf != nil { + bufferpool.Put(b.buf) + } + if b.index != nil { + err = b.index.Close() + } + return err +} diff --git a/pkg/experiment/querybackend/block/tenant_service.go b/pkg/experiment/querybackend/block/tenant_service.go new file mode 100644 index 0000000000..333eb2c149 --- /dev/null +++ b/pkg/experiment/querybackend/block/tenant_service.go @@ -0,0 +1,209 @@ +package block + +import ( + "context" + "fmt" + + "github.com/grafana/dskit/multierror" + "github.com/parquet-go/parquet-go" + "golang.org/x/sync/errgroup" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/objstore/providers/memory" + "github.com/grafana/pyroscope/pkg/phlaredb" + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" + "github.com/grafana/pyroscope/pkg/util" + "github.com/grafana/pyroscope/pkg/util/bufferpool" + "github.com/grafana/pyroscope/pkg/util/refctr" +) + +type TenantService struct { + meta *metastorev1.TenantService + obj *Object + + refs refctr.Counter + buf *bufferpool.Buffer + err error + + tsdb *tsdbBuffer + symbols *symdb.Reader + profiles *ParquetFile + + memSize int +} + +func NewTenantService(meta *metastorev1.TenantService, obj *Object) *TenantService { + return &TenantService{ + meta: meta, + obj: obj, + memSize: defaultTenantServiceSizeLoadInMemory, + } +} + +type TenantServiceOption func(*TenantService) + +func WithTenantServiceMaxSizeLoadInMemory(size int) TenantServiceOption { + return func(s *TenantService) { + s.memSize = size + } +} + +// Open opens the service, initializing the sections specified. +// +// Open may be called multiple times concurrently, but the service +// is only initialized once. While it is possible to open the service +// repeatedly after close, the caller must pass the failure reason to +// the CloseWithError call, preventing further use, if applicable. +func (s *TenantService) Open(ctx context.Context, sections ...Section) (err error) { + return s.refs.IncErr(func() error { + return s.open(ctx, sections...) + }) +} + +func (s *TenantService) open(ctx context.Context, sections ...Section) (err error) { + if s.err != nil { + // The tenant service has been already closed with an error. + return s.err + } + if err = s.obj.Open(ctx); err != nil { + return fmt.Errorf("failed to open object: %w", err) + } + defer func() { + // Close the object here because the tenant service won't be + // closed if it fails to open. + if err != nil { + _ = s.closeErr(err) + } + }() + if s.obj.buf == nil && s.meta.Size < uint64(s.memSize) { + s.buf = bufferpool.GetBuffer(int(s.meta.Size)) + off, size := int64(s.offset()), int64(s.meta.Size) + if err = objstore.ReadRange(ctx, s.buf, s.obj.path, s.obj.storage, off, size); err != nil { + return fmt.Errorf("loading sections into memory: %w", err) + } + } + g, ctx := errgroup.WithContext(ctx) + for _, sc := range sections { + sc := sc + g.Go(util.RecoverPanic(func() error { + if openErr := sc.open(ctx, s); openErr != nil { + return fmt.Errorf("openning section %v: %w", s.sectionName(sc), openErr) + } + return nil + })) + } + return g.Wait() +} + +func (s *TenantService) Close() error { return s.CloseWithError(nil) } + +// CloseWithError closes the tenant service and disposes all the resources +// associated with it. +// +// Any further attempts to open the service will return the provided error. +func (s *TenantService) CloseWithError(err error) (closeErr error) { + s.refs.Dec(func() { + closeErr = s.closeErr(err) + }) + return closeErr +} + +func (s *TenantService) closeErr(err error) error { + s.err = err + if s.buf != nil { + bufferpool.Put(s.buf) + s.buf = nil + } + var merr multierror.MultiError + if s.tsdb != nil { + merr.Add(s.tsdb.Close()) + } + if s.symbols != nil { + merr.Add(s.symbols.Close()) + } + if s.profiles != nil { + merr.Add(s.profiles.Close()) + } + if s.obj != nil { + merr.Add(s.obj.CloseWithError(err)) + } + return merr.Err() +} + +func (s *TenantService) Meta() *metastorev1.TenantService { return s.meta } + +func (s *TenantService) Profiles() *ParquetFile { return s.profiles } + +func (s *TenantService) ProfileRowReader() parquet.RowReader { return s.profiles.RowReader() } + +func (s *TenantService) Symbols() symdb.SymbolsReader { return s.symbols } + +func (s *TenantService) Index() phlaredb.IndexReader { return s.tsdb.index } + +// Offset of the tenant service section within the object. +func (s *TenantService) offset() uint64 { return s.meta.TableOfContents[0] } + +func (s *TenantService) sectionIndex(sc Section) int { + var n []int + switch s.obj.meta.FormatVersion { + default: + n = sectionIndices[1] + } + if int(sc) >= len(n) { + panic(fmt.Sprintf("bug: invalid section index: %d (total: %d)", sc, len(n))) + } + return n[sc] +} + +func (s *TenantService) sectionName(sc Section) string { + var n []string + switch s.obj.meta.FormatVersion { + default: + n = sectionNames[1] + } + if int(sc) >= len(n) { + panic(fmt.Sprintf("bug: invalid section index: %d (total: %d)", sc, len(n))) + } + return n[sc] +} + +func (s *TenantService) sectionOffset(sc Section) int64 { + return int64(s.meta.TableOfContents[s.sectionIndex(sc)]) +} + +func (s *TenantService) sectionSize(sc Section) int64 { + idx := s.sectionIndex(sc) + off := s.meta.TableOfContents[idx] + var next uint64 + if idx == len(s.meta.TableOfContents)-1 { + next = s.offset() + s.meta.Size + } else { + next = s.meta.TableOfContents[idx+1] + } + return int64(next - off) +} + +func (s *TenantService) inMemoryBuffer() []byte { + if s.obj.buf != nil { + // If the entire object is loaded into memory, + // return the tenant service sub-slice. + lo := s.offset() + hi := lo + s.meta.Size + buf := s.obj.buf.B + return buf[lo:hi] + } + if s.buf != nil { + // Otherwise, if the tenant service is loaded into memory + // individually, return the buffer. + return s.buf.B + } + // Otherwise, the tenant service is not loaded into memory. + return nil +} + +func (s *TenantService) inMemoryBucket(buf []byte) objstore.Bucket { + bucket := memory.NewInMemBucket() + bucket.Set(s.obj.path, buf) + return objstore.NewBucket(bucket) +} diff --git a/pkg/experiment/querybackend/block/testdata/.gitignore b/pkg/experiment/querybackend/block/testdata/.gitignore new file mode 100644 index 0000000000..7ad764b60f --- /dev/null +++ b/pkg/experiment/querybackend/block/testdata/.gitignore @@ -0,0 +1 @@ +blocks/ diff --git a/pkg/experiment/querybackend/block/testdata/block-metas.json b/pkg/experiment/querybackend/block/testdata/block-metas.json new file mode 100644 index 0000000000..5d6fc93fdd --- /dev/null +++ b/pkg/experiment/querybackend/block/testdata/block-metas.json @@ -0,0 +1,216 @@ +{ + "blocks": [ + { + "id": "01J2VJR31PT3X4NDJC4Q2BHWQ1", + "minTime": "1721060035611", + "maxTime": "1721060035611", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/query-frontend", + "minTime": "1721060035611", + "maxTime": "1721060035611", + "tableOfContents": [ + "0", + "4549", + "7747" + ], + "size": "19471" + } + ] + }, + { + "id": "01J2VJQPYDC160REPAD2VN88XN", + "minTime": "1721060023235", + "maxTime": "1721060023235", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060023235", + "maxTime": "1721060023235", + "tableOfContents": [ + "0", + "3794", + "6281" + ], + "size": "22242" + } + ] + }, + { + "id": "01J2VJQRGBK8YFWVV8K1MPRRWM", + "minTime": "1721060010831", + "maxTime": "1721060010831", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/alloy", + "minTime": "1721060010831", + "maxTime": "1721060010831", + "tableOfContents": [ + "0", + "3949", + "6565" + ], + "size": "17664" + } + ] + }, + { + "id": "01J2VJQRTMSCY4VDYBP5N4N5JK", + "minTime": "1721060025159", + "maxTime": "1721060025159", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/query-frontend", + "minTime": "1721060025159", + "maxTime": "1721060025159", + "tableOfContents": [ + "0", + "3834", + "6029" + ], + "size": "21765" + } + ] + }, + { + "id": "01J2VJQTJ3PGF7KB39ARR1BX3Y", + "minTime": "1721060026913", + "maxTime": "1721060026913", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060026913", + "maxTime": "1721060026913", + "tableOfContents": [ + "0", + "4808", + "8321" + ], + "size": "28169" + } + ] + }, + { + "id": "01J2VJQV544TF571FDSK2H692P", + "minTime": "1721060013534", + "maxTime": "1721060013534", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/query-frontend", + "minTime": "1721060013534", + "maxTime": "1721060013534", + "tableOfContents": [ + "0", + "4201", + "6780" + ], + "size": "15785" + } + ] + }, + { + "id": "01J2VJQX8DYHSEBK7BAQSCJBMG", + "minTime": "1721060015603", + "maxTime": "1721060015603", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060015603", + "maxTime": "1721060015603", + "tableOfContents": [ + "0", + "4543", + "7406" + ], + "size": "27431" + } + ] + }, + { + "id": "01J2VJQYQVZTPZMMJKE7F2XC47", + "minTime": "1721060031203", + "maxTime": "1721060031203", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060031203", + "maxTime": "1721060031203", + "tableOfContents": [ + "0", + "5086", + "8600" + ], + "size": "36655" + } + ] + }, + { + "id": "01J2VJQZPARDJQ779S1JMV0XQA", + "minTime": "1721060032190", + "maxTime": "1721060032190", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060032190", + "maxTime": "1721060032190", + "tableOfContents": [ + "0", + "3825", + "6312" + ], + "size": "24273" + } + ] + }, + { + "id": "01J2VJR0R3NQS23SDADNA6XHCM", + "minTime": "1721060033248", + "maxTime": "1721060033802", + "shard": 1, + "tenantServices": [ + { + "tenantId": "anonymous", + "name": "pyroscope-test/alloy", + "minTime": "1721060033248", + "maxTime": "1721060033248", + "tableOfContents": [ + "0", + "4547", + "7152" + ], + "size": "27397" + }, + { + "tenantId": "anonymous", + "name": "pyroscope-test/ingester", + "minTime": "1721060033680", + "maxTime": "1721060033802", + "tableOfContents": [ + "27397", + "32839", + "37073" + ], + "size": "50561" + } + ] + } + ] +} diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..871f44e7b8131ec375077838a88fad0a5ff3b9bf GIT binary patch literal 22242 zcmeG^33L=ywpB0tLP$coNg!fC2uUE_*;x#bga8Qy2wB*%?Q~a?CcW89AV>riTyUbI z5fub6f(tkbiinQmzTlt^D&qKc{)`UdxcdQ@L1y^(zE@RU-CZ3L)c^mS^E>J8S8uuZ zzWeU>?yKtB^16IYq$UIYPQ3oZUkBD`h=!wYPQxX0+{pj>I6h**^sDzqy!p{zc3jjB zePiKoJf@sU?Y#Kdsi!dtbr&r=>e z+%s*fcJ|_Ze}AWET9-C=vBxs+-WeN*$uGd$1b>^3{r->fyPT(!hAOUC4(Ad~0&f$NzV>qs04SrBMk~PVi@9ukG1FQ?9IMJWyv{Qm6LG$Cw z>u2KH;UCTlrGTIu^sz^xCQ?~mD3C4pq@MoK{_$YoUO4=ne*ULtsE{Bc zPDXUjR-DL@gPU)SRT3FaiS&`#3xc#{E-f`NPzO}1G+dgN<7ju7JCsuCm!dkl=QC5+ zh9lFH(^BQ4vUwQ}gBLVQMP+jsYsKaYo|To&Vih)nBG8xA%tM`6h0RhCo1H-M|IudB zJ89UAvytRPQS8IK`vANbww!x(x;9#~5afYNOV?hisSry&yrxbpp5ir?V(|dce9MdF z2f{U*rSc0Aa_{a)xp>@HJP;+vA+(tb!($1AWiXaH*ua()uOL4K%R~F5~J2mql<_+s(qtQA27T z8k7O*g(|w1ht@&$9;2zv<1(6rTBFNj@mL)8QR&dDOxY{8!QnAl=NqkF!Pi*KrAE~o zZBDD;22_zXr2w`#JT=^EaeFj`!)C!M*aW*r6D{M5_u;Fq^udzIiC{QJA^}MVk4ufL zF*XU-exL}OC3K9EAuz}!HLTv_QmLKewFTEkL2aYm;t6hvtalpi!Gx34*h-hfIo)Nn z3GU!gv9kmrL?f3P(_pa)bBuO}+xnZNbY!BmIwW;KrPpP|C86!XfFo;RZ5?J`1Mn+) zs>6m%SLK+*=O}Syd?O`3B~=Q|Mz7VwR<@zTDfs$mNm_c^ndD3^U6YUoYK)?$;j~1% zP`fZQx^!WXAr3appmE?7KsGr}`+!E1qJ27Ea|wqKNtz}$%}-h>=n8E`=B1?zskLS{ zn}LX>Af84Ln+Irg6ht@$p(fH4RGqdivxhHTD4Tbmp;y8@tkGz-2M1^-D_|-VfP$*j zR%V_;P?9U~IRz!C;|S`S1R2z*$k?YpwTky?#KYxFCgrHoi}W-05a_3fw2FCu%AvmPo7BD zMTUPMDuDK{5&mf({9-OLF@)TR28qHlYt5NEqBA!{rz4xkf<~jB0k;#WuB;mjP!A;e zQLVIF0E1{zu)mHjEdd|uTc^!9^83J%KBFd&d^9Ft#-C}RaEzTcjZko{CY|dR!U7OY9wOGv= za$Ppq|8$N!kAH3tgtoP?n|*56d$0%LNIW>Y3;Fy(`4b9s`6c6Z`4ftC`S}xz@{01J z4F)^7QiEZj!GOd&%mxp5BIUIg{IkK32<1&`o1SR4jqgToFS8WP-QvAaD+yGhUnHzAGbQ0)!?=LC8!7;nXx9CnY5U(3GEhK8AGel-5OzXc^+3;TxiryNqJ09dqhYZGw?}Y6fsq)F z1jXQo8&*{a13F@4ffyi{a?L_}s^3+v1S&WG!VnEj#l=lcCX*?_*AC3{4EDd&OMv2) zt1tq{^9apet6T7>unGx|`YF~xLm!SSJ5_hn69W&pU4uwsSYr6Vh@{B$sKn@jG0Cy1 zaf9Qz0m%tN2JtD$nxWdX3(|+B@%yiN`JTIZcI<>Jw?RQ-IRY1SAo9;~<5>Thc1F0WwWRdS9 zyw6@V5WfHax?lX@J;1X;yle^o9?l0E!!yb%X4`I#;Db!!4qB(wM}y%&1VUzp@pW2e z3l z0iP#Y7ejiL20p(#hWO+`rejA82`L!oa$Eaih%7j25ywk0gbI_4>cPxgW zkcIT2r(%eo^o3feb+M!mQW+9WckCzKU!e<1fj633e2tL2XU-4K*I5-u^V}fT=w9GO z1oj;}v`!5`ayJe_*a7U}zglQ~Xk`))WG@!dt&X}Oq)gyD(9lcw4d zFPhR`T3&1`Nf|k8Siy+YoTTB2=_wfrL*g>0G-`NGSx%0=yl~pEkr~jTKWb>skU<$~nQ0kiuF3Ay!pyS5(lOJf%$ig^ zp{lHEVt)DZnKSdp=$d2Wu~?E-Q;2K6GGCe13fP1<6T~ayUk#F&9K_5LD}098ROF zP49KP^j3$-Xw|nk^lq0)FShF;JT*Zlsk^DwXm<&w_Rs^chsuBgm0;iOZnJpwqET?` zvT`bnZi}g&#lP9QaqCJqYtPfc zUv5EOL1BJDL4IC-@r3*$2p`86PRPq|YA!OD$k_Ewh+7K26%%kns>$R5Y#~|1dccrS zbm-gK^)|r<5j*AiyihucF;l`r4ykT@L~b*&A&A(2kR>Zw<%kPTn!db zzyA7WGX-RjXbZ#^2(L~sn*AWaVoa~h)?s}gaDk^@2{VGZ!8rH~Jro)lac;#3*l*fH z;lcq3x5d%qZMHZ7KGGz(LiMM;^sFz;+CisK?x_)sZZ)ns^w34_H{ENswm2ZU(4up= zTJ$Rsv%b~iaTe%YfR*OVt0?-Aav3lh%~C;$E{{RcH)GZv@P$AEOveu59Y;b^hq z5SgkLm(v7$S5bK2IK=LDI$X$kO6gRp?AerH!4;ZLHLzvv`MMIwaHxQ16Hu6vbI^rU zx;yM9AZw}-v;t0UK(8%Et7R3=uQ%0Jg&a(pgh3CUjKW7v77e;{Dkz_=-CE9wuU=a*xx%;bJ@Hkdl3vOn{aF-z0yF90|X5R5iv#MBXk zSVFW0B8{qSYM5c1VuoAM_l$@*5oX!W$}SY!hx87`)_3gCFpPj4AGJp&gKD=djL%WNIw30)oqW~ydI z=ZT*oMn4!DIcW79hrLelxH_03F-}vweTukeL(;tk<>`Mm*tRjWFfqxSqG)5n-|azx z3FXP9-g;Tt00eXFz90Z@C$I+A)e1tJG&m3yXq~wR3@eQ!oEE3R9HmN(16vMg29&gG zEtHkI9*7g$Dmwh4WMfB@vB_f-^sbgBu;Dfx_1U0bp`DBGj3jWdZ+nwJE^k6!Nq%ub zUQuzb$uz#Oxv;6wBot3zt_|U%421@ztH#&?`-NiC3UoTiB}LSsjD@mtFd!DFx`QHM zIkeo%aSMeI5DzgpNUtxx@yAq8F^O1ET+Z2(jtJdT-&$bO!Mn<6H^T{sj&?_Pd7e`M zP!yye&x4P+aR17zu+`C|V@eBwR)D#}-MC`uLW9wREq1^XbO7(;8EVCxaY8fgS*6WS5#?ba5)v*PUt*a-Uw&wJT6NMSRW)VaMmh& zdo_S4JH@~=dO`zKOPd?J!y|ayR)|x8#i4Q=xGOU>dp!bfR2*)-+tLDiFCF`WW3JxP z9MI`9ui!;v;2VkISui@eT||KAc!*J=6-w;vOuQ9p0q7~aX($+$I7SaX*@3~MbnT?q z;*}vZMF0L&sq0uKNJJhQB}JiUhyoE)Kr^Iy_#?^65kY6`Xyn=;3#SMS5=H@97qnuM zu(1&`5&qR+#Nf=DJ{b4j5yZazZ5JIPr6nAR*v0Y2Fjm%L8Af?Pq!;&NxXgGyjiUROFo&Vc~0sl zvT|x1E#itnNwHLcZ0NhwzyyK+%;D|}IO{i>@Ma1y?ZDja*|I!Q z-+}^D97u(V5$_c^TbMivhtC;_7DXqepYzYjFQe0<7w;euEdkt9L30dvlSm+;ePWja zqzu|mY$a9jUpZ0N!M8YELOpdck~5|8l)|3)NfGnABJpUNz);f|At*vnVY5U^pS6-< z^Ko2-!A!W-$WrO7$^&o()bM3P+zTmW8pi472rF0#KVgyBrb&11B?h&H1RgL|+M@v^ zgjNl9P6_K*;};MMK?9?j+TKtBEB28>W87`fES_88sW5_)wi1{YYk`QsZpNArov=wg z>ZO5aRt{`NFy3VwDLFGp)5;rxYdKJD`LTkV?DSF1Hi223XNs5O;PJ{`zij67=nG-_I zlmo4hYtnm4RYa?5d_!9)YFG+Y=HhRw6lPxOIWV?wap=&0nFPDr>(*J|q!doU-1>G4 z7@n-06%JP$&WvV(LeI<}1l{}=LFDKIrDuTDMc$uQuN$&AbR^)f6ENqG6o9)tZ!hNq z3HwO~urHKIH_iba)#819KaUOxAOwl*Ho<;UVYS#;nw)OR0(7ZQ{$4_?7eV!t0JqS> z0&F~-n{M^GTLp8VRP>Jy5n?~-@GVCz(14x>&a)u_Oz*oDFVSr_+KsLbF)~@A)9IG= z9&D%s!U!(E1U3%RrcPBy1B4h8(OEfC_JXFO&^I|il5ulF3B!amO>QWf6|Ho?a!%C- zJ74SBsGW<6}Xk`FM4>)qfPKI^!;Nc8yW1`)F=#?vlDc-7Vhe4V5biXyuI%$ zXU(q`vlbxcA%k0$wethpmlUc0wz91qp2mTvh0v=5W`8F8!4BFA;4fK>2nMgQvX=P@xe`-WeGpew6lLuNxsA8vFAXUv4Bu{oxzZz{yg!hZ| zdPa!}HcYCEb>ewhNDi|DO4L<(PtZrT^y5iiERl#xJh=>Uz;tdmP9vK_utbV`&b@Yw z5T!b|U^AL7^IBX&esCgC>;$=1n-Lb7-a{zX8S3SM;;n@XAmo?<`BNwC>Yx}54g2Xa zFu^Qqz}bhyApa=^hkoh-oXU#z84${0-^bfAX-L&0@5Wr z&jpi-Kf=LaxjWo+8&S``gF%TM^x`*U5bgBXQyK^%g~}bm(x_E_!wg27xul3LD#MDI z?Q^3PbJ29KE#EIqk62)>rd7{rsH>h!p)^6%=UW4t-J^H4>zBYo#)8LVT(+d$>~`R9 zAm9%ZjD>eByzwc)BjS7E@kbxn1HU&Eb}a1EFnk2?yYJwmmlyVB%~(nth(G+WHlRQ4 zbN1P1H~(;UO+L1j?RxU;8a`vo@qe9#r?&nrUu;fieRIm&i*wrE^>06?XOjovifqS~ z{Ka0|0aCg6-VaYd#eI|(xg2=7osT3uye@wCy`(2M9$Wp|W67&eXx8?8M1}9yn(f{D1;kBl7?V2?mo$I~Vb*{zY8mPeU&W_G&Iy*bB>g>D{ z+SjdF+`0X#&c+8jJsW5Z{h)n2uk2jjjl(X#dQJ3q&aO_|#;Wz59aFzSSmpP1H?P`) zrH*xD-&)m~xor{!`;X4Gt(~2>bN^hkI}Cr>V{o47vF6!t%!|8ouko=>1BMQ1tMUqW z*mAdwYCBp|_x^?fL+S6rZRbx%oXm_LFknFB>CN@8b=|+UW&eXWop|uEZw}t@!e`se z6Yl-^wVt;Zbw4xnFGpT{q&8=hEurkLjz@Q1-&47~{0;r&TfRFy&ysZYRXwGXUOx8i z_Eksn-=25d+pB9X{9woW=s}%3nr~Qd`gZj0Bi)}bvVC5??fRJCjv6jxOnUx_4dr)d zmlWSWXy+YgG(}fky7OSkgE>Os;bCuIl{{(mZgOX4?zHZaZ!eADKL32s^}~s9XLqvU zg#~|T%`U(HjdfcKy0^p+4LdktRKcD5Zho%e&b>7^-!Lwz=Z`5{hrRP)&(!DR;(!Gc zhwdKp!u)YZ_I!5ZjQ5w_zWbmbhof9;FA)-=Ap=;68{jlA=yFcu%A4(m|P)qGyex$4I zk@pVmvh3b_?2G!1d-R{Y8k;h5%iJMjXRj~XTdAG0;m^w#9*Q3~sd)2|H^XKSqb=hX+|Ce6$p9Z~qvk^N&g9on_Hsn8t1 zm`l`6i~4HS{1ap9ygK*edkKhi0U6lV=&b#ws z!lTA4%*ZXPe)!eLpNwDdLC>-q+g!KhUGeVk=WD-@U2^83%IZDW3dh@%8g7`g`nN@2 zjdyqN+p}P&sq&Mq(d82s6&1ffBkIM*593^x{V$Gu=8E|zdloxt=11h+{PMnC3l2tb zv)-6^{Zmt3dUwM+tLkpgsIryH6{rbi$ACJDg?#}Wl z8+Dh=PT|b*4+x9Q7L!FBSqQ%XLgGj) zbXkt!)qdo+pSNF)h|&QaHJ@d)qgk=Nnn_4VA1;@Abj zWAC|xyYF`BJZNC5KhP--t`edAZrpPS<@t-aS4NQG9LXRTa%32}0BK3*$WStbO-m4# zW518|+;JTC>8LLWM|dcG6LAU~zFW(khK9tSF6a30L;}X4gpcHlk#U8jfFt=Nj{(|h z?!opnK8zp66T(AzIG@UA@`H(%_DCATH{|gHIg&uOaAY&NGie-ub18olYn;MQtmG#o zlJV2{>-d>G(lTXMW$z1BflF4-kuoxcBa_J_j+By%lUDGC)%+!DD*_~S@$>mx_$)G- zBaMX8GJqrTB#tAofYC9)!`2RZj^pe2Qoe?O;BeJ9a&x)}PVe{pT>b@q4ymC`N_v%F z^*Z0dk(FcxN7{*(BOc=Bh>Kjt5hoq+5kHSV&euuAVR9v(NqdYYSB<5Ea>=!P0j(+|>&DZn z7y0%4L5k}&e)gNZNc&s-4M!>7*(95f{m6zc(`P+cX7v+}v+uB$;5Kd?bel@s%UCbz zbL;xs-(=+y7$3%cQ!yGJ8V5HA5m~wf8+#N$EI^#ysn$>70%(Kw3?+?4H$eN6CE?Fp u0PWjpdn&BD->YEg+OMv?1*#ytlNru}!jIv*(S z^mt?MfwuzwPM>-4o=llH8xWAUPm@X*4Ph7tKX?Fj75u%o@zymNiKySX^^q_B`Syv! zYcw)Bg8y>!)=k-o$ornZ@m9@y&%u`j?$vs=|2VSwU}~GTg{qOQ&D7?DC~4nbSE$L= znBW6ajmp2CWCF{ETW(mndhObEH{G;m?M*jiMBj?yp3rFF-?w`{znz=~v+u9_vc5UI zFG~-M*KOQ1$ba9J80g`Udi|*{zdllcVzt1Cp{hlw>fCwrv?^tPRMco(OdEbtv4(4h zYu`F`{El2%iNg5tqJ;U{eC^uXR(%PCsq`-!Z4|UkMJpRDNtw1xyL-(GpD4Bma!{0A zIeT;`{6~k$XQ_W*{nvwgvSh=fvjSVxJp0DV$tj?Kdcl$_9Rb4=MzkNm+I;QWP<~$`~GqqCrK1{1{S~p)z^zxQPtwK6}dNVq69Ly23RBxmadTO&rmM6WGa_^S<2;!EN#2Qwm(}tUAkPCqg=k8qg^PK zZp~HLR_AG3q|$wP+RJ3Ne6dtZN=}csG-I~FyF6Y_t7pI!lxZ0~nK9LS43yInh+T|6 zr-KtbTyTcp$@NqYZ`(Sw4CWWkLeQMi5A6uCj;@fvI=I=a5b}gPyuY#(s3xhYvgh(4 z);pi|hPg;(1EbIAV0{5E7lfwL+gbp&AyuF1^#ns&!eBM$<$Rn!q|H>?%Z;=*V`>CT z7AH+?qXGpOf_RKRy^XbV-tW*v%H>>drGg+L6MafYNJvmRE9{Fe&4AK*eorXACcPuT z`r`#B`s}F!ADAw%J}wx~m3=wKC8=ceS#v!;ZWin3gWk16O0g5=(INW0sbPV|A))p0 z!0EGLYCPuOE}KW9iI6XBHmX~x7`fr*Nyre$=gx*lCmRc6b=I8Pk!n_bzAavZ~(BD#dQT2&Obn zH0bR@I#)K70P6D*R5V7l;{uFS-E`gbs{Pq5RtU0@ae4qbR~0Mrh@26xrf{yyiQ(Lh zoO>gj=rtl9kMb?lEv$My2kcNz79webD1+!N-bAE6ZA1npy(Gp#QaCiUt+@avCl`@4 zLk)Kf;daKrN&b}LwCGx@R_7_|h(fFw4qT^IT+oG0ipPzzdka0{@n%b1eM6?r=7;cVvlZBEC_V4Ag&;bq@52zYZMH(F7jq(s01ASf(Cu+> z5&}SptZ*Unq_JKv-#eTQ1Z-VlJ16+L5Erz0c$>!`46%L(r;D#bX-c6u7{d|($vl{C zWjT0%i1qk6L627g@AYzmP2jpcI0&{*HrScw;KTkck$#@^ScX@g?MHl8*dOPQzGO0ARY)<)gc(k`ksjY?E% zZES9EIvSgt_WVdSbYZAC`qoH6G9)2#)x$CdIy%I;!rmYkO3Yw!P)R=yu6t`^n?hM3nwhXk9P07@4N_S zby?w;Ltc6_rflf@txc#@A$*VzrU8SA{;;V>kIaFyn!mKaJvAN5)Uto6> z8yWi9MKd1a{pL_Ori}UDD{`NXDN95*{O-^H@vj&r-k>wSg8W6ES5)iSH;uz$+oQ+-MP_H`2Pxt8H#8~eQcrq?%Pd-8qtsHHiL zNw8&_WIS@cj3M94^w`e1%KIbY)yxDvhG-P0ye~}?>zChB7m056SehozJNKnync5TB zY@5;%S@h9c>9xqeFI_W$kA6E{lf+FQPuC3SsaIuaBF=hKh9;@M-kPCN9QAHsQhfF6 z85(raA7yBwoZ9IO%^>{R`pl+v#}pzuAVtW~5$II$sv8U9r zVDO|95J4vWm3$hA#A^@M6L8& zS!4iHi4JrhWsxL?BjyFCv&ewvqcN?|CJ_S?nYLt;q-J!M82VH;QLHRsF7z@qr@AdEI~19#_i9ewhTV8+GpLEnl;xg4$yxAV7(MNFD_ zCgY^@9`T&^hQ8#|$YPUS2q)q@sA}LhJksO}V4v2IwkrF7f{)%vosn&MFvqn2@FO~!^Y-N;IP`S7at zOY5{36ptz@%BeQw4bQ8rsu(h6gs!Ntyllec`bn0O3w5Kl<&|R$7gT1IjjF4xov0su z$;1gwEmqUGdfPao&DGv9c4B>9X?00;X$?I1smQOcsIIIYTWzXNmvSR&+%OANd7Ub^ z7d(zmR&c_O9Ut}@eY}@*guR?GC^(D`-pM%()ng}zT`o?jF-(O7(#g$d1&=>ubo0hQ zmm3(I#NXop26L^+U@=)NX0xf$VyUaMG}qQy94?MCSy@wqCDNo3nuUf8ZvL#u zhMADXvTkk~>*)LeM3TEBS`>LXL^Dt$bOvPktQWe=7vTM{5>l;`f)0U&<&r`Z!1aU| zR!GpTZII8`7(`vE^5=tRj37sK+xI0a@1VPfvb+XY*zYi_hUW)r=lgF#G-9yk=^oDO zM7t4nIsXw5M^gAfY$aam@26*6Y%t{Hydf6saVnl+%Qp}PgRU_I*kI61_xjJZYZP#! z%lWnZ|Y7CM?#PBkBcPBF0ynt~28b(PIg19Hne*tz+U?0oCI{v$`m#nOdz>L3PDO_ar5$(nps_j50m(C zqHVxI4BT=-jB&7Scttog#p{8vSYwzS4ocqEU>>ZUQzWB^cdn7`Y|^OgoA2`SZX9Qr z3qKL~FqmvR+|Y2BB*9QvFwKEP2ewOrPd$Dxxswk!ZmFq=gCRcPlSkSGyMNHUpS8n& zpJbpxM<*ClL&eqdtw_aipZ!G-2R@5n_ywu?iK;8}pF zY%uryFsFrpgYLOYw_t|t+%;j?|Gd&*Ff6SxfUwZ8+7uHrQe&(KYm|Z9yglsl@Bm0V z6f+<%%*S|rDVb3uc0)oNGIoYSfrubA2HI=!j54WF5G97rk&A#Ex`lc_P*Xy{h%f*O zyh`8?26%ALZl3Of+j~Qtop<)e^C-52B&^pB2{-JT<4vr!D;-q<28}3ROC z)x+A7!?%3m4Nf*;pF|v)Gyn@B92jd10taS2P&}e^30eK%J*EpBKJZHvQ)Y3?)j>s- z{(OgJJe`mrZgh%w9t#G*5?vD7h=?=TBvIyr&T#ocD3lM7<9U*WsNgEX@qLqnCR0K%3Fi%P3Ez!{$gU&AO6v&Vv5eI1|Ln7)lbacXQs#ARP z?QBk0hCyFvK{ra_jCA)_%tQv!I2}$0&Ky{JQm3Q#tqFokl2&QkbPsC)9!cRyn=m?p zG7gaOvJQN^z-+_RzRr@o`!P8dX;cF*a6#VN&0!*ah0B$k8P_3dHhPa=z$MQ!NJU*} z19Z!Eu)vZ*nE^a8j=H6I`4aIMC1DfQ89eyrD}B*2VYNVcXDCFRAn>sAXbr|UEmpy5 zd7u>q8=Qs+YO_onCzK@wg@xWoOa_S}8<+tPBIZ)uBt)?36$UkXFaw(cxh$-!VQPH` zAw+f?#%micazP$U@PpufL=Pw;P8Z;XrMUS{DRE5${w}#9Vn_#bndfJP-g$l>D+D`P zNFHkp$_*xb-$80{Y9Z)1uTAQf^J_e5-%1r!ZgIC|3QbO6xy*UGljQ-`W9i7w#f6p^ za!S8D0q;2|n_Njjl!waM4(}%M{vbyewn;RDOhpul62))BBxX|#*FiU)K%DbSao_=y zpQaLmgA0Us8m-v?RYDXgu^6z3i<91xgtU6BO3xYt&Jc>9Kyc+fC?gy27(p~~#Ye{p z7xdj`V*t+GW~nKdAb8ISn0LdgYhpv&(;)8^C3K)y+ztS;bUgb6fa9!@^dAAn>in7L_a~FJuw4zpAKvd&NxzGsf>Y%?UOyrqn zQUjoLlRfBqroiJ*hsx=wTTIL!F?nPKE##~SmgEkvFpH-Pc`=?P2pkf%QKXLDG{J}t zTi0$s4c4#-jd!pLfE3Yz2!y~C5qGGmYOtVToik_5oF;8y#X0FHYKYvp(G>@R`(aoi zdiw~Rl8s`uY&7w5%47g9dLO9)X^Sf^_lrWZd;H>43x~a_!C^658#!yU)9!GXn$4W0 zxz1^CY;suZ8=Q4!XLDVX)hf0ROLrDujJ4L+!VZ17r`GKc8@(R8!w}>R)&{%TVzD+k zSQpo9wbz;0dV6z&2~>1&R!g0|q26R~tZ%eSd-}<+oL&U8u+0{`nd6#G4fbX;>$KR} z=0>O0ZgrZQnjO|=ldIm!aZU$2uwa2uC-y!fn(Qps*wE-`G+WJFeO+@?U0pNBHL(pY zC$zz_jV70!vzY6pBrmaePDyX67Z4X-#BHp%Tb!=udW(rObB*<&YF&L3XSLex4Xnx8 zY+>sftR~Zy-OeDt6kgB3*A*|4KKQEN>(YmL{NSNH_=RHnSLBuyd`IpDJm8${K-$w} z4W-==M>0ArN7}*y{H;jeYQ&BlSi_t-^T6rjXRb42@l{;GnKPQ*d-1z(^abCZN$I;} z;^{LP%pKRBJfr-gBLeUvyb^WV^#?_#zW+vT-d#^M6Lo5*I`RK+bb@Z~fzKN0gD(xe zv+BbuHiND`*(0~#jt{`F<8D^jzHQajhL>O~v0ladFQ9XI=Iw<5#a7 zEPw6TgvGCXGNa(F)*W;HnZlfGzIpab5AG^j_{DEa{wL+e{r(p&+jz~U&o}=4n1fx; zpZw3ZyWaYA;oZNz{Mu6u7cASbalt=d4*&f2OQsY|dGp~Nhu+^g>eRTtj`i0L*?#1v zPedcqvw)>gK3xOpZavxU4NHeL6g;k^MAo78$LI|J8oadIp*B`t4BXwe9!LV zJw+=&C~Uj!$Rktkc{X@h12?%FRtDOi-*eH^*$=xuerD4r{NWEDnY-lj8xDLh=cA<` ze(~arjuH19e*e*178Tre_jB9sTzKswP5P#^tNyI7)#+21>&yF|-Pcsjs6O@ zBy;-O|JdjJbsu-zNc)yw9_Kc`an;(zI~um1n*Y_2C2zbwa?YNTi*Dr<^IJFO?!0PqUT2N({9_wx(=tmx#cfQp4nthe|XyBKM#L! z+r1^rHZ1!3hRq)={ibYmv5AadY0AsX&HJhM8qZ_fD)-&EK$Fh&2X8)RelGl{(qVr~ znc^!v)${1V@T;Y|?2niIX03Ne!{v8=bh7B{mnLo6>ie1H@p*4fdSlM#^XAsh_dUTr zcWmct){aGY&8mIFTK&f10~@YB(!Sz#|H+9%H~eql;MjwUNz3jxN9+l|-M{^=eeEsJ zEM41QnSal3+WvS)?eAM>ZY^B0>vyNO6_0#)PTMorv^{0bGyGiloqtTx28Ak@js_+|M|jyA7!&&{@Gu4<-h;z%kxXOwA@yn2c}c; zO7F`RriurS&b+;Se8J6yd!{i=zWgpFHLe6-8ixFoA-$xBA+-$~l1)0T=nWT*&mm|syaw$W8LZ&m7{8&Fje8kI;E^;+PJfxE$ZW3gOK(1j( zptXjXUX6Gck}8H&Zo7c#A4Ntoq?C+c$ncRaW(-H13~>-ULs)VZLu_OjLzey`i@7_K zWH2O^q%$N9^++LvfeFGeAJ0ENk0EnO2SeJ)9Ok_xA706j#bi;i*M~^~{uc)7-uK}K zyhkejmP$3;qwj_K@xdKqpND$Lr=Y$}Eo1p%sK2}6_E{649-=1H!#KdT>KJ|R`cKoI egaXK$pgtGB80HrrZ1F_vJoXAKf9sLe2mTMW&mtfI literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..429f6118eb36e694437b626f9efb45113fb2e043 GIT binary patch literal 21765 zcmeG^33OCNw(nK8&c2Xt5{MWWh)E#b*&z^tEQEw47?MDOg3U|!>!eL@^Imr#I0`z7 zs35M0Ok6;5SOhT&hNz5+q5?XusN7et^%sIb9ch{?` zyVR|FZ{508O;bg4fi_Z`34aIw_Ws_81})N3P0*5 zg1#~EciZNjpUZt?00Q@2O^4GIbQlAFZ+`Wc@4_Wy-W@is@8AFJ7dsAVpko~TeR=<@ zTN2ee{y;;Ir)m7}+Rr}!F{VN5CY;4+HQEHA7V*>1O+e5EZy-E9_0Qqa(G-=}w!bgO zeQMt3{I>no2W~%gWMf|2Yu6p%P951ixowa60Jp7vqfnT1bkpRedzRoncTKSig?pyZ zl<)%DE8*|NgYVoQKMdvYu@MmzwV>?u<#$Xz{7E1GTG{EoImgNp`dX8AzAHRJ-A#S1 z2^cjXo(O*jw!GN~0(8S40(ok*DR@Y&3l}c*8q~j4?{GZJ4funwg(rF6&ig+n(!qmB z3Y-_xF+rQ4J@?}E^Y9$wzZflGg25uJFCq3w0_^|%?=PQ~6$Q8;NKTx5&i1|I#AuLe zIQ;$a+&>Q{NQ5Yv(6Fs3+7#{EFK@mBluThVoG_`dIRt_96e>L}$=_B~s+m0IQ}jMq4W) zyCqgzE*6g{#Wiu-d1CGBaY6?zO8;?eZB!HIv{-GdHjM&%&(uUMLCiEUyyx7BTRIwoa6ui4(cVp^SU z#xIE7wawSr4x zCqkex5i^jC@lk1!4NM1X8v%*1dbW3xj6uMR(!yHYoJ#F{k3Fz93ThWRtnR>;$QBpl z2xJ_k#Z+@nR}IJ5Sw0Xdrk-VkG*W5NtyVibpK&;O+q&coEJSH`P+EMohhy-RkoG{t zkxekSPP5Vg^3qf}?O5ol5|iW{CC-cjDak3hnzb+qU8OoL&Y%P*v-yv%bctq@_Sv6ueRG$Ec1gs3t|mq}NJDLkvZl2p_uu5^fzhopW^7 z00JV?f|90@RvB+1$8_2>-JOXtzrl$J)tw7Gn%Y7H5pjU zQ^2CJoq@EIh^@Shv93+=;o9480S=_YihVG;ycE_@WuCskkca&t6{jXcUK`^FxjGFX zuNx)P#7;wTcm%FiSDSS>2J}!06_lVs;6ZQ}8!@Km6-<+;$aFvdNYIt*%Cl~XmDv!0 zAHl$@Q)M;?u;@Hxs3ZPR&ts^AUQl4Y(`5JRqvr(4Jl(vkHE}X&LC4?2NPhG}g82?~ zvN9L1D+mPM&te3FTHsxGT|9W;IGqLxRC-*RyVL6K@^t7;PP?I#V=Rn=F}Qj;CvS4P zSVLwH%ki*uWU?zhyR!3)ENx@We zN14T9&TFe{%I@%3ZRSk5E(iR721T96KWZ<8woS0>9o+dY>_I5{HCX5}QVPln^#!HV z^aW+b`jY9z`NjD$B^HZmT1Q7|=`@Ye=zt~FXiPL3@gSULqZ?Kv?`IFJ(?(+wvN z*a0edmg}*aSP=uD5H;FE@QW+gXJ8oo8^sGrxA<8=>SQM32d`EcsJuPvAVz} z%wx34>2Nbv2g^m6oF0cey3@%yJ#MJTkxSE+66xdrtdVGh6O8zShv{L%TuyT`yp3)a z!Y#(l8oL<2D_WlGc)65kW_$8{pML9Mx!ybr2kfy9bDkpHycJW+EUpBlHweLff$yWi z7dpZBG0t1WtM0pR9tmE``&T`5h4zsbmL;Qvu!QjVh{VYBs02-X^q3f3tTt{;5|unA zC2iE`RC;VW{lXm^j_p49$oM&DUp9 zXsB88^Ni=B&=8a6>#x^pP>?B9Jp3jNQYO=n)^-h&=T>Do9nc^$-F{Fn14eoF{T6}u zGzcrh$QF{%G$>GYyw>t_8swuzLr=@v=v4`ct3kg~;Vtb;#z`y71N$DeKs|mz1{lBS z4}@W9P~X%(v#oq)W$EnN zMU@rPvMaO4<{RdgRTt0fOw7=xrcE+UDw|tUHf>oUQ*4`(yKK}&+DQqy6DH)3&(6rq z%t;+Laa?YBNzHs)>HNHg*%fmZ8!C$CWa^S<)z2<6=PjJq)mXDQZ+7vdv8iK5Wu|AP zXU^tk@@Yj`MHkmKmQS8DtA1hAg4UMG%Bm%0C3B){;%XYE&zU~s;#uzEdfTkUi;NA^ zY^AnTUA|?;qUM&|xVym&())9hkNhLV68=hs24=4b*~)Naz6|ola+`4T}^4>rTCs>x3ZP zWinVH4&xkHwD83DI!rLqD#m8(fIJ)w*2>tdD_Jf_4>ArqLRHMOdkXZW5KISBFu+U_ z{CGej0nA|F_FP~ANSJ8OsC2F{Pd=;{~c=&?DmJWrCI3teWA zOVBC*1tavK?8Su9WV6Cv$4k3mBjj>|s(|ee;RM8904M7*hQb<)orn=Xw83Qiwr1zz z`f8Rp<>;|01X_?!e}lzL5E&(3<4?^WUo*>?eK3HGYpQefxIP40u-31D5y9914nBi} zLZiSO?*#$7KSwZH7y#q#bar?wRwv-cJYgGG=Z*9-2z_DHPSUNy-M})un$(;I=py&4 z@z`u$E@uU%Z0K^kU4?oMsFDIN)f9(Cps^7Zmg@5bECvfsas#~}L_p}cgE%-+ z9?lWM>4SrJIXNtO!f6%JOK^e1@Tuls1HGV(UKoI)^(v++?|#W@B5j(1FR9pK>SD2< z;M$yAHy$M~RWvxAE`O2Yr88tIAX#CLZj^BvmlG;j+JW{za$dodE^)BJ>iH`o{8dmv zas?ECEbPAyKJR%E@v1JH%5o_y3d}do=5S71rN?5yv)l_3h6Rbg+Sb;lLcy|#I7&l@_A|?FIL(46rH56=pa)_C9Z1l# zjJ*w17tR(LJ5T5qpQj*sf^38m!@;RAhuGB;vB9^%-FfmMuQ9T^8069~<_>eIE>(4n zkSR;U8z}-|iiDL&oiG>!>HFtT?bP!b4RT3(?Q{S-XJFAfO9AP=!la zK(OQUqWp`DL?DcVk>JF^Jm)(d&8(a26%0X;6rmf^zN`@r8alB){NHZL>;hRB7>S`= zRu?OT2rAJX>;|D(>e>WlZvzoRDIy=Ax+x5g1 zfT9Lp8qK@x`H*=FY<>AVX!`oC{qMQy z4<18zkKPVuXEpKi;EC1!OHvs`si~tic5K2lK%;9viZr%oAQD>kp z68En-pVn=(c-$=BW;%HTZ|#IEkY4yQ4xL4VPB0}NY+?!|8DT&gN^Tbk;6nyXQQ1a| z$CBc`F&v!`rQ4vVo9$Ewm$T;y4NGD5v@&>4WyxBqe+xHbzp0E20KBf$6p49X;9jiO3)yX@>2a9;cO991{of;0qx$;ad$(7cSIEb^2NX zE%v1$p~g8R;DRI^2z#Kogr!E$iW~)lx2`)mSf&_*-LnEKIfxuD!OAc~uvP^F-kwZ@ zEj>o2YJI+&OIMeT;XwOr&JMlcvB2s35e#XsDeA#)BXR>e97y9$`Yovq~MuNI53v08k=w!(}@(yr3z;dJD=yUW{j0<125T8Tf zFaR5#tbBbjWN`^5qqK(jOk4nzAK{wdnMq}YA&c5uF9U9#V5yTpKRdU<39gvT5R`-h znJRxVgrqzer(i2SBZtc_uvceH;F#e)3lyy#a2$}wyUjKzfra9nlD<*aCt{ zRMcfihSP1`hv7aDjFMSLWvPnIB z*ispTuQG`;fTfl!3pub4f>R-3L7UGm7fRwJkuetQho53_YLMOrp-P))z@&$F9jpT0 zxB?Vc#b_lPAfgW?Eae4_f}l(+sIN@EVjy>+@TnbKs~V2!AlZk&ToPyqNrB@b7@_Pn$e%Z2@ATo4pE;u)4$7m1~sndLZ*BN&OFBat@q zSo)n1eL7&z!kB|}!cG3W`zDs{mW59auY|**3X_R-xd9Q39X|0P5JRfeqeM8P@;NtZ zt!k3asD}o0n_N8J54fD(he$r)0xRltMR0X3Ro`&~?pEoX1-AYsm%wOHl?vi9La`>F z&AtIh8RazPP#pV&A>>v9_;y8{qCEVTE!hNq0lx}igGAPd4{6C&gTM$~in>+&Td8@OlyH@*#UF`V!LEa8BTvzl3c zj#wuoQDAR<-T{^hAt`S%?9{5j0+W~cGb!uJc?nR3h$52EL%|V*LF(oY+e%Odd?QG4 zlX4g(`)slf25}&282sXhVb1In3~=n?w1J=vW(U6D0-w!d4ZgMk8Cz%y=7U2FL%c~~ zGLZZdXsYY4;#9>;H_G{ga2?6%a;e@Y^7TCdQ7c5?{v!#=@B~B#5nrbh-}=$xj}c=t z+09-**dTc1To=LUWhiRtcbFgVd_K&Lsk@SMGG-Hm(hG3Huo|M7E`PpM)=V|**yG{k zzSpfKAFEJ<@#6@RG;jbMa-^Y#QQ`u}WFsXHzn$o2-2(ZMiBf4q_Su%eVSu+}@sUWKxQ!ZWZ>utQK4?cCpLO>b@%}B8i*JD}%O2QknD`ephAS8r zj+XL-^T<4WUlNix*rDL;fx`jk3=rHctZu>~J|K}Q@g6z^byX!XyQffJ1Q$FfTOH(Q zO>S$4jTPv^3aL;vddx2PrH^n#s)UWII0?gcNbf6aHsrqXojqb+zb>ol%Xl!pnpw=3 ztrsMSZ>rc>v+$E$2v6~bMy}>?hQHMcmJr-=4v%!sdX{$i%R^vOrR zg%Co#8{A;Eu%=!Ur1NDx3}Hrm@P%S8f}UP%@8Fp5HVmq(rRt#S6;i;UN=b@^$+i0y ze0CVP)fe^1AYRRv9)(je@I(0jeGsmL@x460~9_9=tlyCbz0?? ztOT{v2h7OW&85X)01`0HunbR9%#_H{E=jwPL@G#pPTl<0=DJG=mJSG|lxem*+y<`4 za2form36zBrI+=XdFN92#YPysr@$NkCLaHo?``qj{%QaAz3^Y$guNVgGz|a3?euB* z7~+Nd3eC8bl!*VYfz^KfNuQtpboS|QeqL38+cIC@_46wFfx<_B`Wb!!^&j~X#`Lo? zrlfcAm@+5*$B*exDWmX=JV5@}h+k`N?f^s;;`<={;wWn1nAHt3yn~KJG`wc)esp|| z?aqHT|M|h>YS<5DL!>3_SM2;LcYD2W}lWHSpBRfgc73_df$&p|9)X6W4c--m`Sj z{rU6k(Yy6a2j_jf^7hB?ELyVlp^yKvEq3&zCEe!t-`-Y#_nykPu6?Heu&HAz?!05o z(aTcq8l;ZBbA4=E%Ep_+;C}?8Qoo&0Wtoy*n16WL&fFVpF_)NM_8mJsk>URk@l{q_ zTzq`w51U(FS%3Y39s74|_($)1_U~?aI=k;|-NvHa`=;dHJLmIVU&lADzwr;dc06!4 zdRxc)pB@R{(AV3)?Z&OugAaUlbnfB}w`{v>L*fXqjE28YP`X@F!Qa4wynLrdh;%7_R^~+On&H2Tk19a4O{ApcRxy;d-oV%8RHT`B3U;|7WkJ5Q-y&4g&`FAnH(h z9K8PC*0}w>iVM5uJ$brmTU7Io!_2i)Td%sI^5>}{o5DvgsUF>a-}2_YZ40J9*&`E}`cxIkRzT{id3|ZOzjMYPU|SoN`aul{<=$Je9cd+m4j-*vc7cIWH}? zuVJ5hc-ezl9TjuiTe3P|`qcQr!;|Do__u0DQt>!nK@ z7CmFQb=%`R-#rvTmAyK>VsO?&Z{O7Nee+`XQwP4g`LXiy-50If^L_VYC!gxq?%B}( zyIr^by!>xtAK&uj{=fgYW!;Q{wBJ3N`O__DX7q1~#oD-!PfU4o>toeRZ(?Rt*Kf

SqgD49Y@D3L-53blI*sQdDffkJwe zN1-W%`yFGbJs81mO@9=HB1zo=H+2x#J=6YtGlggdjUp*1g;I*F_X+ zMj#sMLykI$Ax%B9uj;NoXR4euGjdGy#76iZT)ITSl*>XOh0NXnQpQpF^*w=h5o`V?7<#1iWMZ zlUv|FF`{QAq3Mfh4}Cd-Fw*H}I*mf3k&aAVvXx%!poOs|^K|YCg07dokiHuCr+RO_ zdP5Sr<$8KDeG`rFP^k6vRrKuy*=_WyjikSUUP~9#HxTZ&&@BBhZAK;vbs&a9@hFZ$ zvB1q^^y>Yj_W`<>K1i=X%PG`DKwhLB^bvyb6}tO1(&r8OO8PjddY8WLuk`N)W zt8a2Tx_KO_8jo(Gv(SyCoP);EdeUq_R}>?wnl6Jhs!$7EE9wJ9@x68&K#CEkY*DX^ zrA+vy!>d|$J#MM0$7P|uY`X9rRv6eul|i>0(mq@0C4DCT<=aA`TzU=sUjM+TCok9v z&#=@$WRxM{#ytx9q20yr+%+E_fH!E*^yaa60<;^~Zuzzs+948xc2E%vsk{pM<34!h eQh0`V8`|UW2Sx3g(*Kk1n9FUre!}Is-~TV@2^1*+ literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..99cabde890630025220aaf3ea2782cae3d1a1dff GIT binary patch literal 28169 zcmdsg349bq_J4KdAa_DwLIMGjkPI0hGs)y6poH9k0D*)fB8E9S$-rc0m?IDcL`5V4 zVhD1`Eg+YGcz~d;qH-v^9*d}WtS9S&;`w)7R#bkks=H@;u4Dr4|Fgeel9{fq<6Ey@ zy?XWPRduIo?34^ipkxU{{{Hs+@9#5}k{}79A0$E12u-`~1=3AdU@u|({ZW7)LMWMmFa8HgZ1yLA-&{MfH^5Vwle@RQ`_~;` zolbs52;2f?cU;!2YCB&#C2P@v$*VsoZ`=J~_OK}fDg@a7lULx?w+#7v@6DHA4fDjS zH;yxVzJ9fDe`tQ_=uV7K`YR~mKg$asYAYsxU%t8Sbs*|_hDub3Bt#;GUJv;FdKBaL zNjl?q4X)osF=5PM2EITV?nwH2fw)F+AnG6-!pzvu=H1{|M@(Ns$e<( z?T2f7CT)A^9u$zXGOFr}zueCxhWvEqyKT2F3hagZUWful0JR_alfNgfTsRQkBWjo6 z`&wURVq#*@KbW}X1*Jzjj$G7Ks8$+G-rPvf$_~ z9s97hC8{>*7HM)v@1aX)|FWTctTd}5Zix28_1JemzB<$-3DV2T-rV-$>6s0Sem6b7 zdhMt;znrT7VW_qc#|Z)j%q)iAH(x(FhjJ3SJ_x!!M3PQAzGmvwsnF$9Cqv(t&`^!I z_i11JFD~v+)LJF^qAkgiWJx~!>-xtc9h`{jW(d%75t0bWrZZQ63?;*v{Dw#X8Tc1O z6sE6P3M2)RpB_JU2n$L9$f78%rm}bbZTP&Ow=3U;J3l+OB+yZor+_*9YW!lNu)STO zv_Jf=-hA!52!VcNryo+I;rFL)SC&y_CA7|;w$9N$A_UKUbMS9=?Qe%*0(epLc8!Zh zaj{XZvy_J@35ttBh-L?-LMYasG)xp7#LpTaX+87lJ%^d`u8bRv;B9E}wA`DUq1?ygOnfrF`&`BY3EvWC0($J65t#$Qvga%?Ce@ zlZ@kod;4=4BslKZ0p-1_%*J}1o|VKRP$>x#6pJg0^;e*I5)9e_#fDb1W*uv(FlgBY zDg7%C^htnmNQN31gDAF7wN<5=Yc;DhY?aDv)me2$LrOg4DiG%isWDnr`e`b?jddhu zqu5^6swR`3wLn#Ym7@VR+dtM{ud`Ss7?EUF&o;3Jt0c%?Uznr5ied+rKu!QxM=~LR z3@rc&tW>F4eGjVe8_%|+*bz8n9P3wYHG7DiXlrth?FF$@4LYlPN?^4~WpF1P$A%P} zjiyqws)@C@mkJrrvR)EVY;cXPiJho27%lphz2gZJh2Fuj;l(zyigXE0cLxrvg1$9s z9SOj%pdw=vVY)|-34HFw_l%>Yz^C5DY`w~+w{ksO(_&&Bd18X%;^TgemZEq`L>vq! zB5D#869fti{-i;pXSnHPcP$OW4rU0DPK07MCQD*sb`6l+fe0p!lZ3=M$twld#%QG{ znb9+-=8mOmoWlsA7>5bQz3~z`#qg&Xo}`*$!3e+LFWrkr&#)_dhuRmTo=%p;#vG29 z+$us9jRFb=Dq=sDF6jke!qgBOB+M6Lu#XAYw+^tBz>v6dVeHWHBJ_vT@PJByv7mfD z!qWZ0P>V3b#0Ck|j<}YeNi7Ek+DZ0a3pdI*c+h7X1EkBs0A84(VuOTfOAOzh0`Ols z!!PjI2!6ymHH-{OuENDJi>2FpkIo^J9FYXB7k8&XFYX;ADd71{i0=V93~mSd)@WDs z=L!0y8%76p;6UHp#})k?rG4-X(k>_)M-3bMy4G-s((kAt8wEyrksw|}acFH!OdBJ8 zQX<_TiKh%70mGaOJ>Yv39X-EO+7p~;7b`Qs35qF=DV6Ss8Jz=jucOBvk`kUKIEh=V zNYmL%UUDtDpOoC4XfH`Li9-w0atmS>NDuV`nG|dYhc&^`!0C?@1nHs!DFp?_xhPEv zw=ia*bayPk2{wnr`VhmtOW<}S*$Ja?ypE+fqhm%(cf{El!4YwVl=@Uu3Pd;0u7#aO z0R|& z02CJ%Yi-b38*OU2#@M82FstfS29?6pVm4YdMiZ+@XlBh8m~#@?1tyqY$Xt?UVCSc) z%$i1>mDN~nW;P9qnp7GxHKm#KDuY31Xh_S>$knPx*4Jy(rdL#@s%<*GHo+bzgNY*^ zp&R%^m%%u$0w311``<$_Yv7|WL#8t6(HSF^@{F83dB(^rxiUX1Ju5w^uFe2ca9v$w zT^*s_sI9ZY3?{y}!Mt8q7X|5@-Na{r2n%a&)@fKC0id{|5a7y$tMq!Krp~CI13a*V za(5Qs0y{uuGS$tssadmuwX&8vow1Ja$7lf7^>J|!X9DVr!koJqiPs?$4`h#0OCF_u5! zO)&YQmH7cng6%>ZZV#nt+2&ZMyEK6ZY3YN#B&f5q)oP7K6X8e)E?fIK-zo&4FtMlv zP;!!hyH>W|rnj(G4}C|-wn`(~sC6fN72GlB<8hVi-d)!R_w$qbM+U?OCie;siVp1) zmJmKT0!8)doz$0!?=x`7pv08qp@~fA%g>#E_JwCYSMUg{o{KXGU~nxs`rxFC-|^7d$%S|GCd??~3u-L$MuS;v0Q1P7xZ|DnBR2TOq4YS#ou@ zUcegwcrm`wM^^1ftsb7j;f2A4kW4=PhJ5%qaSe;(q@Pu@54yxf9=|<(3&h33hm<3h zBncD07V(@8_;+;GZEzLiB%?;j0*Dir35NR+_z(|JjCI`3m2f3KMuJJ29SNUkzp}GC z;VQ=Jb8&25l1p5|nIG?MaETkd5C7{&mpJr~Nnyub;ymFcNniW1!zCXhNq+EImpD&l zhn(LrZ;ea7#O#L|txKH16LH&we0?D=;cFUvl<>)g&*=MhWIqqr|3n&bn-IRI4L>k= zI9!E$CHvXM*`oX0s;KW5itb0h_VCOyxbKQ40qaJw34e$@5I&5q`}gPfz*UGN{K-u& zO8ZaorK@)RA5UYfz}r5I-4s|J#JE}mo5fMw=*aAyJ7_a8QLm`aa1lSwG(8=`q5XLc| zz7@idDfMCq<21Wn4Pktk+E#|@WgXWM^4kZz9{@tXJrvtJfVV5255=ziz)hVO#0eZX zIV$UeqUxWxI!hLY>`MF^4@A73%ELvN=HuI?I_i)8nl$!dOoJ zoDI?|q7<=l`l~43+4x%-?!XEaQMj4Li+kdLIEI>nC&F=8@@}^~&xhkK1mD?g`#v0d z83qnRZ)F7TazHTYdn2%8NZ9E(8G*fyjqa?%Pb08>?08so{2GDjaI)96Ard>rm5WLF zbR;Ij%&Ud^RwQ;Bf8ONaN+kASTzai$dDOV8pB;n2N+ts`fs&sz!5-b(XCg6($Pd57 z#IJC-{``GVGIt-aox29}*YL^xJ~#aPoibCaaGf&xz&xm8Xedml3x7De5e0+?gocMs zo-(bnGOuJ*`S^QjE<^h#8?Qov6x;OzbP^H*nO%tjhS|gJb&?mQR~Fee!_e+10tV zP^)sBGPgvTn^Q2XF=uRc)S!|wMeOi_LsN1y5>sPh8jUqFHn&LDFRvg!KP#(#RMHr^ zY)o4In4wu&gA%jz2gYPo=VoRo$$zW+?^1Kl>31j+4_YS0MEGH#Lhn;BA>7u#yA=vT;V-HIMiT1sT>1J?2m}PQ$<+nbnTbk5Hy`|@5zZippzl7xUSG?|EdNM33-HWd|(t->=JR=ZE$djj2FdxPp zHy_Fy^hULu6Fm8iDH;xlh^P(r#DRI>4OTQ-ttL6&_EOMcj^-;^fo<%}ynX9uZnsiziE%RX_2Yyw6Z`W+&bT<)9m_URGDyV#`l)kW$-wvR)6f~#H z;Wtg0t^|`tnUS86Ju)K;Jgj+{Bhxd~^;y~+5g{Ns_FA3TUeD@v3mRC;3X8#LGy%yI zSgTp5vB=~_DihgI;O~8)&<%}rce*@Fq2Z9~z|%^gow?^$%BA8`gIJctOc1ruXI6W_ zvb&NMM0k0{Q)z4<<_Gi`ghXBC$_AfIE~-wbxH)kjwsc^JK&1io4ZAiF)Tu$<(kxc3 z9zq~;w8#XzJW(MfS|r<*3J;^MOK`4Z(uF!iQf-7SqXxOr+@M%MQmJGpfxDdC7r{g! zlbdY@e%N@W!v%(jt1<@$K53ECQJuH)o>i579svu$3i?kEdpUIC0)e_oq%IId64RtJ z=!_tB@SO%#Hs=;J=YoLeWKhYm=o(<#2d0gAHrSy^BeyELBL`!TRY9o+XTf(v6QKad ztA$ciz+NPF7Xd74v>E1hMNu~x1WRWG^h#Y5nCn^rn_S(s9yAs*HyGgy_8(gcZT0nJ z>rP$-Hy|AH1{NBu(~!c%9&-mE5=1i^$#?3onxZORgt{zZ)tD?s&D=VZ(V|-b^}7`A zx<0-zSBz8gu;FNdqfex2&Sa8z4MC(-0VC63F&WL!OrzRXPq`)-T;7Y*oC+?#XqrhK zM&W>c0)*CBXw8Zb0q>S;7;eJo?LStrpA;9{@JperOX zp=p4X#_cA9Ha7b-ctzCPDe0;{Px*`Be3;R2vuqYo?>on<1n!Awwt~HGw4{L+0A5(Y z^kx;Hd7E{hjVuaFCbXc?rkTsavUI9RZv+M6;*RhzD+y;B1VAS{CPSH#k)5T?%1z75 zsvnubsz#=>xk{%xNwrQ=H(3*brJ;6d=Ux*dx@t8dW1XS%uggWZrZtLBng<#1I|}cs)-hp$xBH; z-5hIpI_jui>>)9nFVQz)5wVjiMrrR>DBOjdmrepAcF2+wNjiI%^I{5EDcp~A{}gu?)AJURHEy%VFAw>grXLB%IrcygVAEq`C@>0!V)T` z2Z5Zs%2Rfis$p5y(7>9>;tLi&RycLXI}WLQTZwH5wmIYi4To5jsk~k z3JyHl8^A4=-Dt9>rw$_vSCAU&OmY`0r;cV}p;igLC~~5+JE4H{i7-$hx24o*t|rfq z+~ip08Ywp0-8C&(_6k@Da?(v4W(`JTgPv7@^9Qzv1PPjIXi}Lijqt2WjVn#vP5|C( zAe9ns)iqloWHri7YG3+}^ZZXy&9`qO&|N)wgV{KrZnL=$7@C1B?6~C^m!DX`Btk|@ zy2jBNn64X};IR!IHzD(5$-M&NSs>Y*>7T1t*hWP&-(I}G*H8&rDdQAKMROAZust9ZL9Rjs?Td*f1=t`*M4 zSSiRT*Z-n<&wVJ7-m)VIo4_z_@y~(Pm{~Ahz?sXM-6r8~2a!9r!hu`|tK1vg|8-zk z>|4PyIj4wvJkUMRXcC@%1Me|81noRLd}N`WGOiC~;Lb9D8=rra%4M$QRSK8YSvKq@ zt$iBiCVo19iF($kP$RUC^RO`}*KG0F9vvMu{ zBPmhCN&fltJb-g|d8LQUhz?x_4puk$z7fV~S>F^Hm&-oTyY5I{>j{!XuQ$g#PUmqN zuSazk>5Uec1UzouIyee;LzU5}m&w5|1s*0ki5R^Ko@Y3_>s{z30EMpgoEhc>Q8un_ zq2ew|&}Ptm-3gqfKqeJF=uKJ6!EhIqq&$U+NqJFc zfEog~zLmKB9YHw^XP*a;qnHSRn4cN&oJ<<*kC3grba+%ICj)|M8raLr$Q!B2(kOEj zp2m_#JhJdF@Zhj$=*QwRORY#=yLz5T5x+;!>-e`ED=ZC zia{R>etQrfe&fMTA=g5OMj*B-)$iULrN48&L*nf~5bGFkPO%P#dt3B59;)*=YJ41p z7EL4Dqyky>LEYa>J+QNBYckZ?*BY+X_0ZI#R9j#*(8K!=2h zg(aJ|i1sVDV(hkD>M*L3%v>QE5&fGgFDN*%@6Q|4xSIJD9cetb`QS6nysMX#>_HjY z!=#L>y#K)>M~>+GCUbD$ss=L4$gJwbd>1DY=Zg}ZQ_Az&1ecPo?k93Qg3#uYjjtee z7^f3hE#;N4=JuMKiYVj)Q-&6}L8|J>%QL#7N~nicLLAJnLFkpJ`D9%%g|+bccv<1@ z=+4UxQb}>WQ)z*7)K*a~J*1B|(Q6sbk;fUHln$DGEU|AyPCsgJk`o0oxmKsrlk8om zf)lUX@dJ7R{1`b|PV(8!ddw6r+L-MJVxI zH`v91Bc@J|3iwb3XHr4OmjVmGk08SLY&zRFgJ3}fj$~pfyY_%jHidvgtur_WYOJpZ z7CJNEX9~nqgHMI{ZBa6YeQ&XSkC)&6^0v3jo!z8ow)7S-H~1By437W#bnr!JfzPKi z`KlB`l}^r90nK^in)3{jB^7;61jNy1pfhvk4fQ4m$NL+h6U}S`yjKDYza|qA5I6gr zOIPsO?D>nh{JiFnH^IvfJd_nSIm||v%@xl!2=Q&xGpiaa*?5~&@47DYu^3s6=-`nW zk-+#^vfC0(s<~{jGnw6%0Xw3an&YCJUG+726v%GA(`A#uX05RTG)so_Ew}LkU1>kW z3BF9atq4AS7_Z>#R**%#mbH+T`2^TaP&I&gV-xmfN^N?*t1Q{AFS@Ja37jbA%q-a1 zb*ltcxE4*FUUesR+j<3XF9_5TE#*R558KBfklJCVhp_90%ag*B;JqM!IB$sVyb-pL zM%_S*s<{`b$mBO$o;#V>y$UDNOU~SzsT|XgiIZtQ48xo}dcwH`T$b-X%}Ku2;2;1r z99QQJ@?HnR00Up|eVyX9N4py-1iGgp0d@e{$b`|-BT3@jLUMZjL*-gcjwtk?6?8rJ zA0dYz!ij>EQeSGjIG%;4-bx2VZ+7lW4UWD~VnCyuOZ&Tzh`suo@a9y z69D!(!Ub5Uf@>acWK6J;Q`^)Y;jIr?Qdw2xNhID(0i&Ee+~UFFpw$EChX_ssh83Ue66EA`Av`>0w zdPYW8rZPPmY{Y8NV*M_DO9;XG#UcbV8n# z@!gBve?M_in{ZH2P;mR|4)|HwzOrLoM@L&be06lRuY9PZV|9CHdwVCj?Py>9!1~qg z@C(8Am1XVipLBHOws%~Dg!a`Bu5Q2e^YjhvAGB{xxXzU@x6j`HatFG)>KGJA8rgQS z@xGm9fddE5+cjbN&YF`6iLFyY>Qc(qUddCQ+BtJj*(?4b&^<}5$x|k$wAOT%r?e(V z56*1$?_YWPp`R|~Caq2GoFQvny0o0>EQZw2TKis}oLuo(Qf*sb-IQJBtA|#!K{xJt zwG%zq)>rl;>9i6byXbenuT#Rq!^764ma>mZZEX|J8NV8rD$5<1eA>D;wc6Ida?_ma zkKBLo{JQA37bwR~eR9x=q0H1ZC(rFH%sUl~ep!p2?mW;}o)sNEE+t_5Qzv(wd;8Oj zNddCbwW(Q8ubsGNWnuB&g7Ws5 zPoH}ycKqKz8ThC1Pn>)7nVB2kRBU6T7GU%5rI`ZqmGf8nfzHD#R zL|z!%+?M!y-#9dT`9Af!_ltM$tJO|?ueh{B$L7T?UB9}e>f-x@rK=LPhvE{y-2G17 z;Tc1CHQu8k{|4kH@4mYXv&vARq+-#+N*FzF2=A34@r?H$3`rP%z5IAg;3Ms6q`dux z2jcEsbCSx0oK9sh7wu;!gXRL=L?g(zP>dn}#pIkH-kA~xa`#Z&>f!qzkpA+(H*I(I z8PW8uT#6R zTq{eJNIOnG65ZR@R=Ot|%{Xu-^U%^shty-v&HZ5eqg%5!eK7gRy|*6!>bHvpIWL{u zU3sW&%-K2D77p{fctZaA-guN9_J)7@6wU6%Q60mI5?6emEN@@4A#ciJb;(Yn& zq#r)jG3uap{Qd_XJ6JG!$F#eShAqDTjscI)`K`Ti;Ulugo@)5>wnYrTd&d>uh!?v85yMOo_%Wm7! zv3Ta8s!fOHy}tUzIA}=NEM3O2#*(ZB`e&+1PikHIBs_}jPRFS^sy^5v1X7$=b1?naV<{s>e;hKVec&sL)K&8~ZkTo@@%MgT ze}86m?83#nj$bWXsXP=pv`u#E=g-#suzAMR1JbRMoe$1^`E$)ERgAiqCQ==txvEOh z6sbQ@Yt+xGQyc$zdr8BES=y!B3+Ag|P)BRR)b;9T)bZ*s)Q_q=)eF>n)Cbi~>f6;P z)LQj1b&dK}xO-9^s}9iopcR%WeDGomm+*0UV#QbmHZ^aPvFA{KaLL}d=MW%ilVGSgnQ$$2p8bq2=^mj zG2}~v`y$*2N0Zbjgd_1761o-PTkvRvtML?sr{Wrfi^m*C#mDe7@Y-N}1mUMcZ79l$ zTM%9#xfk`nXPyZ)8?gc5CagzzE?$XR_qOjrcsG6&;a#{2t&iQ@b-fV?hvP7WLvaYg z!8i!vUN{ip0PK&jAI1oj47>+&!+<#`L5CX=Zou^jvsjC;2CETPeVB zuE92ht=Q5#n`z5uS`l6!T!;Kd_08{H$2_KG_M@*YlV4|wnZK-f1a)l3+YsK0w?yI1 zlb9FL3p>6T$t-5_usI6vo5`GIW;5beoF)x8iC!-|Hwod1cml$ecs#=6a0SBUuk1v< zXMS`W!nJq?>Q@?H!mMKcobnL!EVBl8BD@-RAp9V1N7#T@^*+U*V$pFR=-5Wui;Zx2@7# z-a*1#e8pAnikG^lAu`eZLemSlT*7zb?U!|2xa2)}4{D<3?ozVOf=Lo=>39K~DP~d-wJ4 z`qihMO*5nKL3T9!UAl65r&f~H0RoZjQ>ighVT2I*kOR2atI59?-#PQ!^mzPRHvHo7 zg%9>Ur`;@FOaZy3ddH#tZS`sapS)4Vbxb)_Wj{R>X(P^8fF5)+n;;>%Gk;;51qK=yzBGd>;J(``uw5dJ-2V^ z2kMn&9hVgiHNf7trT?;`A@xQ5*@ueXo8I(7lms*k19tHUIp^qSU!2P#s@1|&CFbgh zx$TP9K8!P!Q@-bkV+~-3rkCvM7kC<{)RTI2Ud+V6Jw=$qqtPMMVctkEL}>?1K2%?z6X}hH)yFI zm=DO5Z|GA|D?*oVs;E=y(Dg2jOzumecBv$?mO73@*Kg@$mg8yi<(72$@{{o8`x#Vg zNbGnf)fl?$%aSioW>HH+)ScNf!o_T=Hbgy~L){>W=|w7qO_%cKq_psy!^JX`5kV{{ zqL2~KZD9Wb)=?O;HZrEQF}#cM&-L1wp5okj2Zj}qxeQAYOh?9HZ2{WW72s(b(?atB zSHQ)2i}Qi2PRXTf=K?glm}Y}axN!n9rnJ!>AItcmE6u!GfSsT=s#%vmKw(6h8J6)d z-TA#-dt(*HMqLBX`5JlJ!}w#RbWIGCpb;5U+g%=}nf7vic70AhaiTOj z*qG4}C&_z&Pk28xTjfz1PQt zc?@b(zUh*_1m#niCRnLts!_00V?uFkaE3Jpfgk23LWaG0)NsQSdg@z< zAYv1xGeyi+0eXdDMMVCV#lC-NwRVf!%3EMTYwjCgZez=Gv{N?r0M zpvf^l9PLHTMAVn^D5K%jFlw3tRj~$S#dc``O3DXDt&}8`P#+Kl?;I{e6>D^+Inufj z)`~P15laKQs5L;`*`+G!OMy{4 zzstxcZXOGZnk+T&cDAru))WBh(Gd|^D?=Axs11#V#**WiwbhVk!{fA*NM4AR497KD zvM(!2@-ZU$m{IPN#Eo#w6Xcc}mX@5%f_W&_3*pK^)Ip3FrwP(0VWeWDF-3()0gg=`Y~+=-=+nypq82|S03Q$l!((~~%jpI!Tp&}rG2nCsI)fc% z8|Se&dD=mHX^XFy=lnL#$5@KG8Qu^1q=@P9LFyoRqQc9pte|;Yrz^nN0zsas08t-p zBWb6?$I@P}%j>L|P&v`wQR8sfE0)Y{DeVZlSbLF7D}$tvkI)eQpV*lRzSHnJUke z4Ed_U&U9Bq-rW*p_}&T!55_TGdqsF?Dtac?IDDC5Zg63sAoA7{DV-U4n-bD5T{%#9 z?Vd=ALFG|9^T^bTTr&Uj984u;s`beRO@2zIR-am+Go=~R3$jplK~CPV+~KNg@>T1P z+uj-sik(f1sWqDgiWU5I~1!KZY3<`Z{^_=ZX&kKV-RqgKECFu|7YOABvG zo0tGw;z=YUwM!V%y~O`@+wgycry2wP$x+GC_(ZZQIv~HUB#H6$FUhKyu-vC1H06S~ zTKhEwSq#nxglkd$ghn+`h<;O}8X!vlO`}Q>sP9Nog=2Mpib@XB2U1i*oPHV@tL6F^E`eEl0^CK4uw7a#`%D?pk7{wW|46!cK}i;Upv)BK zwK&F~SCbn)jUzw!d~ruA={<7f1`hZ4rxNSMaV6&MRoqLl|ENF{Z58w8DFsDo{p+cC zpxF?WTi#E_15A#{jCUy&CzvYX9C@z}hbK)`U%PZzo*d)g&DHA`kq=uv|a**@_ z2PSEaoSzZT$p^)=dbD`fkdy^S>i!Y$crP4z`^-t{eBY@b&V+`&X%IZ`Jd6j*U$Tp5 zE-i0bTxJfHkHXg6BA*?~QOhD&^L`7>GF^V6y|2q@@o;uaBG|g_ac20ds2W#wePvZu z<+#cTHI?I`vYd2%&A7@A$9Q{nWX}pEtTxsKW{3S@aY5P1d%coae zi&$vC$I80A!5%B^u~&~b`z5SadgTz{Lee56s4Ag_pkfsZF-RuF2-dUkERb02+Yudx zY;bb2nLZBOQd$n(H^Sa;)Z_Mp2@x%T)!1;gFbP3yJmVOI zDxP3~=}7=0_y-=*Kr#3xx+U-!v=Xy?oZm&TL=kw%@(w6L*sfxgwAZ(^^R(CRmDZ z$`Z0gy0FR~vaS?5`Fd6TN*_vk3}iKIHt_+`XayeG8*+}y9!p4 zow1Rjhnm3_Xml~Gy%|VlW(T>99_TnZJOOZBH!KNQ6av1blVM>OF49>E1Z9mHV4KTM zhOrA~HR$$r=>qlw?hthmg@r2TDx52uB8m@1(i8LtM3|vS43jxfq+sVZIHy1*JDl36XEO~)p2@gLJ zF!qERDZH8hMma%wcuIk-_xXP3P-YrDpZHxdnSQ_o5MC`^N#z`LAI zhHr$5SgtD>kB#MU22>ykcOl5Lcj&2Gl8fw3LA8+`51K7A zJ9%zpVn|PjFVX;6*hWMB7-fS%gF=#lx2M>Fkcn&*E+Lgc(ccqj{WT8%vCadGg|uCG^QjaA#lS%3qgKWDB7 zwy%mR;Z!XGhD@=@`9aD(v&fD@eSnp0g$zL`@SE}gb1KASWrXdB=#&(Kf=6?fT`(9N zVwSY^BOsQRlT!oZx0RV)@RM&GcPRPAeE^oVN=C%uk*qftkRs9`$$ju7ABzrL(v}J6 z79nGI!Twat3LpZ0*vO06jvxd^=uxze3A726_~I^K3;I*hbI3-pSf5<+H!fS&c@o6?LBzb6OStisd*+|atTue3@ zL@8whp^$s>S{)Zu)?imttvHzgdGZ!Fa9}Z!w2?G_bshVH z{?3*l%aSr|Xp%#dY8Pt@X?2r554yX)GwAJVpaXQ7S;$whV-(`Ic$_vOz(4^HV-$%( zh{7ARA(U_{Y-%wuF*pc80)8v(L&aGGawmjeV2tP^8b!i~eJrlS4YM#}RsdC;c@b3D zBa*1%{u|l#YJf=q``obW78Kqs5jN(O4N`y)1v%+9H2DQ@b-28WSBN~`fQ9e2EQeQ$ zm_UHOX?eHZ&)oz+){s~K$HIra98caQAg@G_&(imPvG*YSZ-k`N_>AyhO!n>t>Hr7f z6`RRbb{2W*voDHY;JNbD$4~t8%I%e;t?0%5S8i85T=lcdSIEl%|A~ia)0MDI*{>0s zii-c^ZF)-lKROSSk^I^Y9+1|BKC9vULh8}qWP%TQ-EXiKjy=0~?~x921s|a7CRFi5 z?#R9PC7cKo;S`&Mlh7Ixmk<;}&@5m8Dax{Mm@4UKRVm;V_b(qf<2dvNTb=i%gOLoE-aKal3tPkK65@wxiY!g_V1&d&PI$9u}^E>UG@W_AFy>gDdyMslR90 z)ZYP}CAZ{IA6x5o=eob+cK;l>i+|)s?xj28c%)DLcu6`O_x+@Q;?dXJR&9TB+rC@Y zJXn0=ea^C7XZCIT>kaq4{rc9&dbbAu+<&g-!5yFXpMUVdPtBXZJhc^#+q>?8JM3lK zKDxE*mmhxrp1lw4cK3I@zP%-V{ol9VTVG-S(6sU7O%FZ4_tx8{c)rx`+_~QUUhk%l z@7lArY|}G6+ZM0g@aX5;&ON`0I=y-O+-+}Ox3#@y%bH{T3y&R~^4X3Ju0Nbzvu4d3 z->Gk2*YTSJ9m|(&Uv_Zo8)r&Fzhy_6LT>%=A;GQ>ehl9NXW=;e=)?ewJZXbNB^@~C zLLvy~B;_4e$=m{2aW6DPw#t%8q8N1EhtW^3OV7wi|JSV#oPA-#6Seoecxl6w1ue}{_*LqWOV0*{4|jiXZi>&;IT!^N-BBVU=U5t7Ll0t26T#Hx|Cq zmP@t2|KPhb_FY`K^Yr-L{#njB!L_gU?!13b{+EBAb=Sq z+vq#%o_%%lzkWS?`l}w?R_<jhL{gr{KkHX&=9#IT4)u)T-I|WUpj~x;Zi(CYM4@u>L%dv2w#t@P*ZM;31K51fv^En@G>nPhOi#zAe>F#h#brC zQiQ8qS;(7-GZ0S4X$Zp$CkShC3c?zkj67c6gMOf1lY}tFpfTFOp!@9DhHwX_5uSw` z5WX2(5&jmw3H_*KZ!yB7@hF5FF^g~)o{sP|JQd+uJO$y&xCY@#*p09YcOvY>4pcC5 z18MqGN>3AB#Q*kZAQ}ASjYw#q5C%(~y z-ns$LMR*RL4K9K8*XtpvKU6D_o-^7-W%(KY<_JCy$IW1qpCLE@pLc5Q-YwJ9?0o|azNU39f z0knVbz4YniB^tg2VSeE A(f|Me literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..b5dec6d9474300cfbccc64e5259eb166b0a41e07 GIT binary patch literal 27431 zcmd^I34Bw<)}KuGmadRAr3K2;(iWODg_a##y3o=BwF@nalr*_bLzWw|6pCA2K;_d1 z57`uV!3|VYKt)iIMGzNMR`n?^;KTh>5LA@!%-nmEd$W`j-uwOD`+X@%?wy(QpEGA> z&N*{tZm21))yTu;Mew)i#L1gts^y__MBhk`k`PL{cF?Fj!^?Mees%J_LzUNddfx-> zAo#m*@vR3U1=?spAheybP>f_4Aw=Hf1*Pu`CI9xn^v$_&f8}>S_TZ8CANgkQpk2bn zIG}svwC=7Yhspu-|Idq;?CwJ^J?vLfoo620jbHf%K8Q@9^xrV~_rt?~dp4q4e!Glz zW3)V4t^l!x{dF;27V<})EK=4fqaUHAJsu5=UZsSuk zci6+eE!gnMlTGi>y?;{Sw@06z9=mQ!@AeH1&osRF#@U5io~qw)&6C!%Z^<_v-DrQj zrEG&M=lI6v7hA(F)jPmtXBhc+{-yJe#s|e^=SRDv{ye@rN~u($8&|FQcmIPH{JX0F z{7cE{GV?*adGL3~rZ@fp6B&j8{0oH<5G_w5W1zmFp#ergS%Y{O(4q66PDOMQswN+l zU7~T`|LO1p+z6tLOb9U0Y56#Loczwe-+HbG(KzHU8s0=hg%TYVY>{+%y8P`O_q`b^ z=q+G}qO{J?3_EB2B3bz6XYQd-etJC2Q}LxfExIlLUA{b}d1MmpGeGBH5k%91*f*1d)GmmqK-+1)+m<^1K6Uhd0- zz2r@N>JxHz)we3-WBJtA74k^}Su&R@$0-+5DCbJ2DC} z_&-P)Q^(kijHA-3XBK4iuHM))9cCGsi_mnW>{;j3>P$|%R>#z6?M|c9$XYYfpj45# zRCGP-)S9Ph%`V21S%8!gby|zf%s8N`@anMuTM()YH5(mHIYuO#F*6p%>Xb*i>+9jE zud>vGC9o65)iH=LkVx>5GQ3*b!kD{JMMxFXk>N(*kg+nP&T03PI>lx2O^txm2CLEO zn-gAV(^`EQ$I9qZJ8LVqYb}h!mnyo7VFF|#WmLV+~#55vAp?9z{w$x?U zk}je7zQEx%(6_AKlL7LIEMYA~=zb+8$T@=V84sl(r^HgGRqHZ4xt^`>urZ!8iqMp_ zlnY6jC`}%h0yC8iH91m*Di$afWJZo{^3lh>8XD#tXnz=+2r0Iw%X=yICd+3df{0V( z(J7MAN}*>dW@UED#x_x{&8ct>Bbs6yOqVMZ>y&a8#R#Ps{>C)Lg6VxMH1iWcb1Qm} z$`_-aN|!4YA1LMHMW~`V;AU)dI6w(TK^ZAn3Nh4Qgu!JgZm5j#=#&a!-q5)s^o3+< zKoz&k6rgzC#+l0^pccWlC?f@fBd%pde_;6CU~1uJ7zYo!?A5T$Poe-`uuaNH!C;Bu zHxl?edrKOzz;7e?*(zxmnUGwCW`#NP-uSVF#JrJl!1dwo{ve{addrJ=X%i83105!| z2YqpZH~L2eeM#!&qJt{%p#SWH{uAZ>a?u4vC zl%qb9(V!IRI5*{zUS5>@2<1KxCD9=s{YUAV6it~2d%=hlde?5eR9CuNHQ5u_W1xi>(CUF9M)=H5pWN4@D$gpp+g; zr@`oKbG4{+tVM0GYg@Het=iUMXB|4$#;DWV8M^~)OFFZ_24;lVj9e=-KUZtlwHcj^ z&grrWB%-wVAb6tI=x6Ezpe6w~T6S)#o-=)?~G~jAnhhJ1rXwL>fYu@ejQZ z-f#_sZniyjlmu6BEm*>VGEGm-sC<>CaHL8zYPd?H89h92cwS_4vlVP@b8~!iGhv?9 zH#@;piSJ!tz?+*BAfI!P_yUmNVC?Ni9m6946h{gd98Id$W@|RNS{S>PaWal(Bil^G zz*<2e*6P$6t&CksuI#MY%-EalOuLcv zOmmyo(H5p-T~=p^&gO~);WG{=V}}c_%1}~46!~yKFEKGz;zlrlq<4fs9Wl^!9pi8` z1Mv{i^=P;sEh_7MpDixy;9R`Xlnw8aQSRXs>%Pj>Gwn*rU9P}*uBKmrjFy6emKL2( z7w5?b(L0l+w`u_>g6FD-xGx8QP@GJw%j{sBegsa$wCvBs6{u*`Y-fgLe8SG2I5{LC zR34@b?-!8}DUV8wPL1i^1H~rB^+}TTOzhQHk=DQ8fHc|LrN+CxjNyS@_eQUEVKGCmkQYHc6?RQ))I*8{#t5_u;N{PJ!62lf&9QH~}wXT`-y;oZA`+oWI2d^~r_0%3Od!_lq>r*l6HWGLf*DI3`TDr|E z%^z*wnMUXTdX-DtRvSwECNZ8M3vrsb9Fb!tyoa2LexQ}7z1%di&L@47`FzG^B5&b- zNNnu+T>-F#GJ$A>d;vqa7s``%T`s@>g|sp!w6~|sZ5hso$-Irjo#CP+PS-yaF7q)F zn`xR@votrq9ZrxrLvey$bM)uKWnGzzWf8J2EXKMBSpcK)QiQBayYY5}%wsxEM92bK zk24W6x3#!EQbrBNeUUPc&DazvBc@_^q)ehqk44IYP^aHTezEdXGeRLmS0uw=>x{zg z4iH5d&c=!ZUd+jfFdONT)qd-M}+H#a;;!)}cx za^UU-TeX6wajoQx`Z`gHxZRthaaV)N?Fa0R#$AjqsYN~(jRTA|j}8AW8h1JBWSY(x z>=}1%K32rwfCKPS2LFi|>>i;0JiYBPm=0QZWe0%;x5wk^IV(SHNWUy)dU|%R zzJvPaj4ddiVlJGLTU}IKHeFpjtSnv8qkn9#gt*v@g#JAfr`DELS5%KI8&gqLnO9O) zG-0%+cTzX1oN|C)^R_fq>y?YNx>#0Z^m;kk9Db(fT z%Z5)JHFDy>iMcW9gNGF6T~#)^eN0uqL8+R$DPzktCH=?t91|ZO-zz; zgyi)2__TCI@}P?9Nu@~%N%8%%k_L?_pVd&`ICDzk;C|Ty`ehH2rwO=byq9n?bNrF0*ogJ<_ghRU=`z z9@cA4b(_;^;|bfeRz0k)RCTcYV=C(FYkcdMQt)&JZy*{<7mBJXIM{ON!l0muK>;p2 zsH&_S%F<-lf@}|hcegCWG3->ec=c zirO`*k*YijW?;o-d;$an$kt}o+8LF>%(kdhWSDAH8t)3p&<+xaFy?{{18BY<3cbT* zbgC^zJ@nIjQf*dFC9HJqE)ts{-Kw>zJPF)A$5XyHPyU3!H;~p+)Sjn;zuf%1{9&5> zd`+IFV3cM!EL}$q8l8p$@fsQX5!^k{ML2-Jo7Taw*sO>b88H3f{b~@XYdne0af-;hJ9v)0VV| z!cj=Sv&c_(c&0Bz(!A>oz6b0OS_0kG0lgqJO~tj`r(y2=tQ_bWYc^kww~!W_%jv6q z-TEA&R!{|ht8zG5n?<;3g|Jzz%WBoOfGX$F9J)4!Xcgh!rnQ@z-4qruz+IRct;5o+ zwdf0ns~iG7|7{P&5dTgT&2TYY6L3A_cv6X)-i+gZzm z4hSY+R>e|J^peBC%^vYwxM01H7%mT_#^q?^9I)=t^F8R`W`w{>cMv?yARG`~4m(43 zn~rWO@wcie%=`wc*51LJ0jjS!X1#?j7+U{8#H?xxRbb$FUp)wI!2275ZK8_{9bjwX>3e_QGg;GMzHrPU*GEh%?!+5zWZqLYVT^VrNz*2TNh+!5|#c6z&s*WpC z=xc}h5@0eZNHeURVQpk!1xnZea4(aEt^TY`z=XK`-YdRTI%Q!`gwX86+VLLIb0Tt z)^1$bMKA~U$4U7XDDMShV2Vjjw#sGG!^WhqNclR|0AT=k5y}QapqaI>vT!@Ogim(` zG~3ex3pfro=t+V|4y<)iGbt&lR7}gMZ~+(Qi<7ay9;?nIBxU7@i;GfqTJVW^4yv_w zSRn_D^?-j4Wkoz#DBlKM7bLWVhy(^a+cFmn6WOP?!;ZJwyl_652AtjVMG}`&&nMv` z5|I(75*^D?i^ek9k2mUAyG=ZZ1*#fthndytgBZI0V2Ii*fW;X#XA-mLuQbe@$?j>C zt+H$9*FgwxvcpgVy#tEj(jg&#r~(f}hjf<((3}pV?FuWV8~EL>8p16Gm>A`-u)kDa zW#yQS7U*sL1Uqz4L*G|(1K?`XWuT$- z`brR)s6E{xpU61fBgx+)(ZA8FnwG7SPzEz7x*NTenmdotZHAo&@V2CV9iY)-9b*tT zH3)|NK?(|{MVNiS1q4=M)#QL4YJpW6->u+>@K=5YEJvWWb0bCq;X^L!SgqZ{T1kA6 zkD`+O5x8t)Tn<&MS?dJ<2M($R(K2r!(keKaCc+b4O6BZLDlSMjFnEUQj8+qgA?Uyj zhbR|gZv@AOI|$!Ji>OuLgU^0xub^>$L(ksL@z3!U)CvXxRF~U3<5ow+zZN|Gx`1m0 z-W%~DQIm)tT`&l7KLM*2tZ4V)cxs=! zx>Zk3Cz3UWghp@lw~s~$$ihJ4sFMVS*2pb67-t<#@n4^jt}G+~5MAnUltP7MnV@IM z6}ayZD0A{W`OD_G73Xkde$!#t6Qk+lE0Xq*mbzQ-g;xhwQET=K2|+>d78yQM;GTR# z7dT;am7@}8kQ8!HBD%2ICYo?UfYiTQm#$?%U-F#{Cpj+@gL73_h;sGe;!^gojoF zk_^6=C|4|TAh^!2gKB*tS%;b&4si1$8iK;C zmTsK6cNv@x*!!TTwLNP>Z<7|(9-1FS9#`tmcd-#qH;BM*Ax+^P>C0B}O5(~`5-?#H z1T7hmI8Rmw(N=J#%gqoX{+IC444Z!B*g?0L;Wl1c2@~CE14l-Aza$w9S$YB=n(p3d zp%oGke+vNVmJcSOw<`kr;@MTDic8?}QNiBa1-?AubchTA(uM zO6~AW!W7SaF|6$!&SDZzavxDL=tQZ~NRcEZHG|O zqBX(cB;u?TljJT~m&C3JnMG?8Q217OZYLNfLp#ZyEChR~n7TkVh(mD(otRcYB%w1B zC4@s$#6orf2C+e4tUIGwEmAt1UZb`c{4T1+FF9QNs2Uiu0tVF>HL&u6@p$P$+cg$r zb-)=HI5lQqu8=F*GC{a{2+!!rHoIL&fs;d$dvHk@Cru$Au)zbj^`D zo)h$w!wk>icF|7;oj@U)4J`&CQQST@mmF0x!_gLMUWp?KktEjOt^I`kF2A&7;Ybc- z5SSoTov-$A>j0QSyao_e1j?CTPO7;ze>1mNBVBZI3%kiAU<@wnR6EQ~5Y}XR(#XB) zq>p6>6D12&Cyir`f&(m~0_}`fBukXn1;ZR`(-;V>LlDI;qumxl1*kU``~XXrut9Wg zpeFchf16OLwDSW&h(rg2PRg-YN7=N_HtOz>3s}qeQBIN~YFf^~GFBa{hhe6k>tL-a z&{<_90n{bU@Ut+J5U9CQ&>BfH9N@8o<}Z^#NlVzA_O7zCa%gd)WymxJciIeVXF4XO zI9>(8t~uIZxhiQD7=eI^sjL6U$JQk$uKSt=N`2Nm5`X2d+LT(9F;sN{`W}HUYdCYSK8LQqa7Dp@WUg zdU(iENQU8|1;4;xZEl4)I&^a2N};GwNIKR9*PB~VlNA9-vy^eb+-|V)-RCV!e=1eH zDkfU>Qb#*UO=?=5jhw?4>5DEnYHDg~c5@{rVacgm5(a5;wQ?8T<6cZp6vZ$I_)|PM zszv5sR~$kJfE3ZGYUpY2QcCde5SdUBUtJeM^Nvc8a5oro14EDD&}j=k6U17@G8Ffq zPh5i_fI!HztdzWgZm5#bfm&zR4a*O>!|Ex*7c{V*BozlE;=*Lsx?n3B&TR>sdTPT$ zBRSM7xK8vt2=V$NQ426aHG(2?uo4;iQh3)*9V9+T=zRi*cp==@Cq$cSSk?>zb3p)# zOJnVUSoRM`c_rs?M4&JT{TBx5h>CG_3l(=kLF1qFaE_pn!Mo)zFOyq3XLPI?9@nPm zU9;?AfIs0!x3_yp7C^`3)9QUvW+ynNH>z1~_zjVF_h>(f* zX5is(SS@i!C+KEhrK1kE^%%XcO6Iov$cCU~^!ZVH5TSvscsTn|$uD{x;3Nj>8POz? zKn1Og!M$fld^Zpfr>zC!K-CFxdWgp#9LZ#&fdUql>MSyzAsPoq0I0eNk}505t?5Kc z&TTFkiv>z%Od#!UB{$ z0h0&6sZpqb%Um5i0;3r@N;h1WU+6g*BbosoPDFSh1?oWBC%9T$;c#Cz&n4`~GTe`f zar_32Q^J~E7ITl_KpR1O~uz%%g&!@XI-|SdCi9HV`=BV_HC)iF5~vQcCm{IQ}L(%Q~e|MIlurhpyWq4WbFMqzw{+UsGFKN!UpA zG)aIpR*BPnRFz*rk>`V?+Y!7pV2iRpOwvIOM3c;SE{85CffL_g(Zfg{THcJ)m7X(xz@ zYOk5uu6MB4z(eEYH(B%G9S!f^@IJ)9o943C?eH_PA+Lw+3X6h7-J9g6Lb`sD@>~gW zl@L#UPqovFgqFGZ+H3coyLh991$8@b@9?Qc!B+g+;VNY=xI~lbEHiV z-SuDJrq_~s5*a^4e;M~*CG9f+^c?q=<-m^7w2T{Pr<+{3fGlbI|2rA(aaa}!FO!KiSuOMvzAYA$aUOg&nQk`0 zYo0}Cnolwz$!IcK9F@!3O!jiq3b-dpHYgADn)Nn|$x6rxl(h{gjiv?UjzSK!ST@6T zhRL**fSLxHOgl&hkfY;AL!+Xiqg4%iou@xopSemk{rR+amuFP1+Sk<5TA4KX!0M#w z&+9AifRukufA#Hw51g!8rM~8Q{eDtr_-)!L`|MAVlvOPqYf08~XOgPau{YmeHT1dA zsH%5g-kpSIJ%46-)zBSZB|+M5`cbRa?z zKYqV|+VGP*%FWu+~stmCUT( z=N$U`p(%GaHP-&UCiZ;mpD@C^=d3A99XNYz{p`bE)rM@F zik2;YprLW@O|{XBC;qUaA+DpYv9NNhWAcEqvA3LQd0^}Ky2tV_ezm~*eC;=T$|r=~ zl#x-ksn-l;;pjvDm5+?fNBC)U8T^!4$V)>1WsJV@0Ce9XA5%gU%Oze^`FNM{NeuCn z@X4cDG?mX1N{EO&0Hu0I<#=EL;bf!AkDKhkvdBIo3pSFe7) z!2bEn(xY3pTrZn`ck&|*^Y59_yYcm~!Oy7Po&DhppJR0Vp_*St=I8u4s48iC&VZNK zG_8MdQOyh{Z~0hN_LBoX{o?wwP1}#IUbOI?>AR9woxIjHebE|b%fg-0o0FdZTS+j>P`Mlxe zs&_K1Cui+kz3OoL%QIq>w;t~JZTgRNobTN->Ezy#QwQhYzNYV?<5An2Qh!p6+B%F|{)$=C4^bZfrx{v6Yo&dnaTss;>^adfNE`zdIZM&^H3%yl8Zx zVPcPlX@>PR@;wc+8m>-d8WQBwVwh=lUu)EZhaYGiy)%5w{Ku}+A1P9Z#KQ(h?O8Lmr-;k{>`G((Q zk26=@x~QmXWf^M>xxFdz{wUOWT=spz=o!C--o3tP;mD-73dSvMi&vZ)Rkyx$sA9y7 z!r>`p75(>)cw=ks8=EJeJW~?qs!7^9GGu+_<~4DLvO=QP*S@lMNP5Pko{wjpTs&mi z(RBUFl8H>&&fSyW37tHBW$_|4(V~yOQ{4aQeyuMZJQ5L(s;z@>+J0f!#jK^-=F#75 za2Q|OTDqn6m_j!_@7pDso%atsmviv!&DZ?%TgQ6!du2OM9XXAgZ;ZL=^KAy_C!KeH zJ7C7A8*aN^vx3<=K^c`gbKke~535eEkNhK6@!iO2HUC_Zo7nzOlyvBo;tSTq&o>VK z_4&A?abL~}tG3B=`)6)ix8|p9&BMRFrN8=vvu{4N>h&R|VN(N!XU-qf@H2Vi)Sr~S5_e-Q>)Q|xqofgpm|~9)>lt_gx0pMz4M9d$Bf_eUEh_R2WB|GzwV|Nx1T-o(a~oQ z)X%%?`gsMX*B?4EZB95!SvD@?g^7#5Enhi}_nZYqCjFKn+!x_KI1S-c+#BJPs9`8B z2KPcZ2KOW%NeCz61cc*p48n0Z7U6vS7Q*}SK7`-IdlBA)cO$$DzkxI_7H&m&3!a0L zHEDSWtFa2Ie?uX`&yKXGPWAJE%N8ynOkHCcp7vSOOs&%E0BK*kTZ$qmV<3$MHif=*qW_%OE z8}L);QRNfKgmDGJa>8>j^3jugB;iDa6L37qjYBvV_aIl%2*XdPAsm6j5e~zl2!~*d zF!_&mKCAxfI)q)=iLe9P5xy2LM7RSlKzKfGN8_HH@C3q-uSd8AYZ0E)XAK&#`mW7r`3v}YgrCEk5PlXv zgYeUMBf4k#1Aj+&8D5I;-|$@s--$~R{vW&q;XCl{iFLA_O|sV!eht5h@D98k;aBiB zgkQ!lp z3gMIZD}=wqUm*NBJ|P>15BxAsRxRs-Ee2sd*3te*z;j-N|Hfy#>_&V8!q*GX@5w$p zB>NQMf8b9L{uqCR@Ns+$;iLEn!iVvp1pJ{%wpM17-HY!*SV}rr);v@;8{up4EQDv` z83<2D2x8Y{S?WC-5AyRd9lm8r<%gEsr91ryx#rR<-{8JOOul)jKNPE@`9)kQ;q$xE zXUK1&!mAY4-Jw4;j#}^vlm*8IT%I%mo8*sAfuHJhPTe1O7u*1Eke@E*u?W&bC%*Xb x$$^j$>nq4t(%<$Z#QbY~$|EylQvqK`^Lvm&2>reyAx_%n$+;x`jt@p{`#&oL>Q(>% literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..d6ee39321633f07c863c1ca933c2151304857c2a GIT binary patch literal 36655 zcmdsg30zdw_xO8PKz0Eg1VKeb0+bmT*+f+KMFEj56^(|O2aF6e(<~rmrD=<%mHWPY z)l^(iGuQl9-!ipqv)sOBi)m$j>uZbkt+f1~d*6FA@67@O_Wgf;zaPuI<=%7dx#ymH z&bjBF_og(nEJ5NSnMLEjwzuE<-ls_7DWS+eC82^S>hkOFez|yEQO4og4`w~yp3~md zYrsAnepem3c*~8)?F}tp+_PvmH_E*SMNyG7d~iPivDOX$eRu2C?e30Xj&I#?{>-Lt zPDH-WpNt0BOU|(yK_a5)p~=mUv5|VbtCKyc19N zxKT>ONdVpBRs`)yKZoE0#9;xQamL*Xer8?y;GEA2{pCZtPd|VBrQm;0IiUOc;2Wo# zcTYKB{`%;Fmx9|b9njGi4vcS!t3KFx;nLr!5b3nP>%C4seCwCE`wll=X|KGsTw8ri zb74ni^@{fDmm06^n06~zKkX&cg%_BYnsGfEUP{&V6GJ{ewL0vKQ8}wQ=j?(fkDQs6Qu|q+ z`^ODC!-hSwxqnm6`wLGUIg^pn?_YV>E;nosV~kg)Xs?}Hm~&)NL5lpFyiMQ!x-pbC zU7f2vduCz(qZ1$2&D|GXyd*F7<-ns&H=ha1y`Hz`THu8_rj3%gXXoRPCV9VS{k0AXpz>mF6@Epd2KH(3eax(IpWOXT#ut?3Q`QWPuzE@ z;BR49j8}UZc4-#2o;dSh(x2D!qOWY97ZzH#IYYnn`GqZq6M|CfS5-VZtYyc5Td`NY z8rE*x@YT#4Pdxif!P(0f&h6~CFzMRa`?oyy*@ih45_aS!mnv?>teWk9b>qy>`=o8$ z;k(j@LZzjxIk8SgwzJzX@&)kw%DT4}(q!C0@I683y(NiQ>?i-A^H>}@_@CWHqvLspZ)%g``{H;t0=u8(7K!1t9WX;K6e2S45h9pC>{qI}0AYMHJT?ET>L++gYUHf@oXGa)~9QfUR=f+`+8KNW{1@zS~20nsK zm$M^4yTR|GwYP3S$1(NTu=4anX%PJ0-0@S1|Yq&_74;A(s#Q*c0hnTCy&mK zp+Qt=NPw-qc5q8Vg$7d;sSZqqvk*7jF~Mz5&b9`HhFDI8#f8gQTA+B?D_;^@E-dzE zO98QG3uows#k}o@;(UXt5A zxbv-klAArb^VJf`TprE(VC(s&zSi@tA=dNbp%McJda$1)k2}3>J$)@qGK0H&G+Z*1 zuiIae&Yk|)Uowt6JvM-?AVHzG!wPzq8gzAP4I>GmK&?bbs1Q_`qxlxPC_$hhs1ToW z#-L`5g<2IeV`#skUA-ef}X&q|8z+y%rJ<3f62X{-%P4;(t!re-(+GJni zQLa~L?FmOA-Z=)HKF^?NVvP1s-s2g@Ng);DRiSQTN)%e1QL`X095az09Tei1V>T%8 zkdShF#2%$EwmOxy0QlvZt!u(ecgQiG&po-3u|o2E3d~{Z6lRTy9odRjJ!7pC>=_y! zdOIkJ3YYkY!j!_ICZU2ogJDh`qoX|2C)w#`drb{f53B@8Cq)IfM}tpzc%bAy3gbkf z67Nuvyb|Q9U{%y{I(-t+-XTPlvpBp7j^h}|@(78P;BX^297#371+)C3Thw-xKFOl+ zt*T#$^-{DXB=~rQWQ+i-U=|Q$peA;ps98M$8E+m!Jb9ZTMD`;__Shf`vY{T{p#}Wh zA@hYF57G31MpmL=P(ZhPMD6f`R(LBD;>nvHVJq`vpp{o$X@#9-EPBvsH+x2H^#k<0 zl?w6XO_vb;CXD`1G5Q$}JHgFZQOEG4WE%vGD4Bk5sRJbpUO=bfV2cUX|5}T+vy+)zcs>b^4fdcD+4uPeNrW1N6jFd|gl2fG# zBNL?w32BM(iSeE_HCnL6H8s6zYB2FSRgDQunef^S2EL{y0LobhimL!OjEtdKtzQT*wRS7G(tk8 zsG6WaSc%6aYSTF`0WoYNLR@Z`H(vOIG}Z%H;$@LqKg(I1ifIlJUB&S{h)Wpgq@Xq_ zskTiCgaIYNAaRjPab{A|`*9$y&X3wDh@yn?6xlk`Rvi_aGhm}LClcgRP3 zaKK!6FX~D;^Q_IiUPq?JWB!N0KZe5|B|gVXbR16(`z@$+3LJ%aBB=(+47ewR!I4vM z_{ZU(VAHtG3*Z<4{}?Ckb}#r3>XHA}W;hCQ^}d#wI@so3#K|8YtGBr~unqm^N1J=p zR}*}X*xYl3Z*bI`KR#zukCsF}d(`HhBiJElH$AY#rrw~WRkX_H9?uhD-}ricpf2Vs z4#~^mKL!5N=WI$k2FL$K84jUfzQ+yQHEy*-f?`I0G)8AZGoe$R? z(Zu8GE^f>pEDx9u={3K7J{ykwJ3uidD9}}_NzDTf@NFaqltO$4n_6Y z0lW_bq2K6(ECax~6~}y#?KrTO^P;eTPX%#Zs%D))PdlOy|(ZDkdtX(_45|^qYftoov_^>S*L`Bj^qBw>D=hd zDg4YIS>}$TMaOM_MCOyFv6a1$bz<4LlrQ!|c%C`+P~Y!`L=(`NAbi^kxiB%EHuF@# zne@v)!ED9W09#P}6URHGv)+sx%if~6H6}XFUb}JEo{{Xe`$qQY#U1_1`hKqc{p_6~ z&+*3_Ll!ha6YG7zo<6ka!$&E1KX)HLpN9OBu_?JJV++zIRFxGKrH&e37@wV+k(HK^ zIX!pWxP)P`=}CD5B1<&MC2>U=nYoi?836+lM#aX)$}&ep1dqv08Z#;_En-+iWK{HU z`M~Hw;SmureFo%CtV>DAPLxlIPcDfcrB19>wWjAq=QC-Ei7_eJF$Gm6_f8x&J|%y| z$bp$sCaZaD(!_$Yw6UYo#-v5%C5(w4HYlY3J$(nsbER3y@zVT^EdPk0zF`58zP{n< z8R0R#BU4923>q3fY+y*=VnxY_tmvvS(aFQ5i9-hu=pQCij)=}ro7k(5q`0KGIIk*W z{FtoCS#cT3@$ti>`i~hltu8k|Eiq$!(S$LD8QBvvGP5S8jm^VN%li5Cj2NmLGcl_~ z5*RoqqNSfRmwX>$BU)R*xE#d)3Wcz zVYSjC{Hx z5f<{IW1X_0d%P?KCB`zaEAZUxx6DSCzOG=fb^}uY5mYTibPakXiHLKT(h{bnQmZhu z7AUkTh^|&krP67RN1HcwjuB-vCqS55Dwj2|h+JX0*`(IM)H5(j9Yo_>3$1g|Cxe{ic+B{=?rR(Mj>l5&w%cg>C8Zbpr-Bg<0Zi{1}q7w(s+W* zm8nh~8EL&nS1V-&PujilWK}x$#>|kk0nCG43R#26q?dAi&jV4hc3);T8f383q0nGI z4m(DCeK5ShA}3y$%Gjtj$(qzEm@O^ffdRh?Vb8%}W^F$fTdDO15mc^Ma#z^M?NgCf zu+Ip;al~Pi%M;=gl13&ZLgYVn#K`!B+PXwlvVahf9G(oZFxN2}^^AIku)?U->GVKy zF=H~QmBtupwnC40y0~i>D73?dhdW-HC{wbSYMNNoozWm}I8Q5-B_R_8J#?AX4scnv zIEcs2DV`!-J*#M>a3&ZkTQ<1lH4}A0#7!jhu-6DXR|+MlZ`e+RQ*|xKTb$9P(!d#r z904h|$P*D#tVNP+sc8R}&-EJ7~_?y_>94Td5{YA|cL zY2%a*8#Gv4<;gJdagXHI=8UZ{?V@8#bvjLqG#}=q-NeH+WO^vaLSuuK#b!esSav02 zG?I*@E(2h%q71xq#^4etVT#k5M4N)HB(*T-I;}|{oY#~!$dyv~=a?_hSI$++-80zHWGRo11=$9vVVPy!cgI__YNgJgm%%Y~llhi(m{GEzEBdWIb^HF$p4=bhYNX7-_S@ z08WVgEEPfE*Z`>y%oY{E=<-D|QVpZ6H#Imf$9#u*tf*%k)WLb5Svb7{`y%?s0g z=q_Uig+nB6hg8VdK~%?tGa_t;j|1TruoXIfW^&`pikm^8P3)Ag7{an{65j^b2C_5w zdob2Sn-!a92)spsOgh#xtN??9Wr>+s7C9u+1kQ+}QzUi^coG1BIfQk@fDW;6Ov(f6EvUz{Lc9*^ zz!I2=9B07-z0n0>wr;d^eb2?9YY~f#Lu4Q!!~%maLSn?Q!@|bI69m2N43ahS0NDi? zOKY9NKwfJrCacqm(Xj+6bOo=2FbG}~xd4{WfC*G9S!m9@b_qqcJ&BGb!_K@z;vx4W zPlbVjZ7zs*Fb2EDf=erPF>aX^a}5}n?(H5}aTqNd9x+nZb+i@V?$8K=#Xh|R;T4>{ zDi&B5S!kh*AAMM}L7a-4ER9lOG}*=H9fT?zq2$+WwLBtmw;X}M zG!!l#!3fqeC9n>&WpWoOfblcgjJ&I@3#N?=AQ1nlht&>_@(2Lgpn=S&1&L{u=0H5$ zr5=gESo@tQQxRw}CwT#3gO`oXGTeYt3X2Alt_#fx<8mT*!AWqEZ-{G7WV-A~5(Nmx zSH(!1AV9;A?H}^k9IU?JDH|8YS%ld|GOdjZ0u?n%;&*q#N-HSHCZ-!?5(lRU(Up1? z2$o=^ts(+Jz$RloUXsh2cxegIzg7v451qcg^)pZC=X5mIV!+wV+jL6 z%rU4zMZ|DI;&3yr!>K0frI;|T5ixr^qYOv_(FKAU36osrY;>#z5||7q$nbOryb;!J zW~K1q5|?RaTkcMyO=hEs?Lc>+JAf2OFH2pQ#Lf!Mt8zAmC|n0yxnr#@#ceg|4KPhg z6|H!|;;>r+0I}M)M9&BcM#UqO)oHLE$I(a@JKjlz^*r#7l-P%6_=+qfOcoXc-g#p& zI7Mkw0rpW@vue3t|DYOgweZH496KNYSCAR_L>w1 zV}nAINkVvzy98Y3G4oUwvrta!*TWuq0lUkcO`_g70_<$wIL2aOBp3@544b9~Bkizj z27}H3i!rScL>$&l0s^u%I`D;Y1n#cM019-lb!y#3awe+Em)Gi#zqTp$kfElFp=?=78e%h;`APvw3#ZEJYj0&f8Cm zchLYQ9JT_p)F$2v5wW(HW-+9UgR7~9$9)L|>jv2PE>5s#gC8u$2b4RhTcK-YwD9P( zYnVygE~gbVzglSr!{5Np8SJ8#>zPhGI3+f4Jd}lHJfn#Dvy|F+U94>sZI*HKfdzLU zhU_94Yb)SJ4r5dbMBN2h;!TUoDLl9(8EMYh%ydA}q)C%%Ec!y&2}qz|wG3!o_Qa}J zT%leFk7vOMNa+KPfnsIX2V&8;I`^W5o@F7Kr7)^FUkUPwUD;1@f>FG5Y+y9-+&hrM zI?DAB!X?W=TfZ3>*xn=KF65Zqs{x3@;?nCadCaW+G7`bgC0qZj^uiRfWoxI(+P(n@ zLShNPS!>-Ojm@IwsN%x1Y*ut5m%a;-b{iLF*h6oVq`Z=5@% zh7U$H(ae~^5r&+`zicZ6l1t1s8xgz`u>|i$B;km&hycMrIO#>PAp5RfXEdriU)@@? zr^!g3>LD*_DQ{31;2|wo<9B&{L>3U^1T6xIMR?X))BO03J5l5uDc~}f8kjou43IRVgj1jPRg)~rToRew%tBB$8b&rg4_R&D_Fd%hFz+tO^ zICkH_hg9ppui#y6641ys7;MMeVq&a==LEIbk7Jz_ai5$3T3kwpG~Gp?h?i2^1q47e z%a3v}0?G!?{&xifxAOsv<+oSh6dXw~E-*w{IQR=oaK_d zoZA&C(M`ab5e#{2cgSan=nf>_AYqB=4s-{Q0%h9)b%I*_N|f8!uwTJL5|>&BG7ld( zV1F3e87<-z2x=gUB}+T5hA0g~8F2LZ!f z(Ab=!2bYkYM7G3sLq`+S(RBl2uce79sOW+TVjt^-G*))?RzQ}e#3EMc${2#qzz&^t zABcGQ_T$WpjZE`ki;RT?8L_WtVCvzGb&!UN4`R*Ps8N_c}*s!_<5OwFDG{ofN=LB&^J9c@UbzU%NccZrO+hoM^ z#UVCe5j*>`iJBwpQT8;8D7?<7v4bbraE_gbjaGd?#7uyd*P+hc{Suu?UDsM{tQSC8 zy(WPvaI8)VR0{bqAh#C6svs2#KXE6@mvPoe>XctEqo-F1E z;=hU3Ig$K)5w8;2r4Gc1K@q|uA$W({R{mV_GqBXXpnD? zBb$5s0^VE|3`-~NS)Qul-rzwZC<1V2Y7ZV`ssJ4sgXe}5jptCyG$|SxZ2903DE?_K zL2Isp2zlewiHMG^gRl`~A>+(t7zf)S?T#CRG1joIz0D)v9gxB6a_mn*784^}RCfTz z+Eb2r0h1hn*f>HsiK$Bfgg*iZ$3i2D>sVcLJY85?UEr`IlkUJfbu4yZAYi{eInjy0 zIR*krO-{tJYAdSZQDC#dfXFJ+bajP@57qE+!+MF1Ja`J)fpNYLv{naqny!jYeStDzVyUsbR1UT zmXT0L4|SmT91vD)F!?92u;QEC71s4oMM$O9smC54d4EV5Q2|aWaqk!*D`mD~u>wMA zBF|KyR$vSW&KPMaWO8+k6BIJ*stT*wZSmUe5AcQ;GIC(XurCvDhBOV}r_uz*rXD{u zZ7AeQxNQo~C$^mgojMj>Yu!Q*b_xhN_%$se=Z_Z?u@3~z0OvAG3$eiwcc9B8W6O%k z#yDMQnzbv?dMVjQXOnPw?l~A4R=)XHw}77tFYAZLBv`m&@E1CaX zgMxR)bcMD;u*ocKoID797@ijKkl691%xsqw(W&_0p0kL!1B^%$ye40YKWy35DK=%3 zN@gXJ;DM>fX3Ve{?H(1km|1wz0ZW6O;)KNrc{za9_~4W=739)@NN`X5$4Je5zFVGI zqp_viGQ3WaV{0-3z7xT|5lB@rDr+=~2g!3%P7&Ax0(IE+u_3LC{bMoU)**HeGtHuyMc!VD?EkX@ienjuHc>HglQvI?fkh_wX1>0 zw*2jM&UCE}zEz9KnE#W;9a9F#yF#@5nf3U2M-EC_y+Hg%Zx^y zPOTCohCuSSUI$*Mkv#BV+a=3%vWDV1#ZBT4Fu~;3iDGF)W;p{{GO#gec#V%qfnOlT zIT1vCLp0i6e+cW7U^Ps-4)MUtrJOgyHz&jjyD7v=VWFhOus!6+Z;8XtfaB4G2!36Y z$yL_DP>2Dsv60Im6blcK;O`g16U@qK-WE)+GsMA)28NJZgE$gC2P5SSUSQ%96NB4M)+UV0-QFl%I$wX4Lyy2clQ7?PYamdUV?B||11!Cs5mAZ1KSl@vBA@sap_ z%_C&^t%s1Iip2wru1#u%MQU}9U^5^`F9|k|SQO*{Z}fpK65UV>UW>p3fp-rXnq}4S zAPFdL#kA^Xl~Feh-oK5%93>w9@wbxTZ!$T{{ZE>%(;kDbmhExSR``%gawUvwgQzI@U@fQOq<0HWb%S?If8 zWx5sSoD1B0WcIZq4BSL?z{FW6CcZzF3Xp_k&Y}Y7*#Qln`#iirG*>4zesXN`Pj&CC zNzr|>3!kh`(w*PcN=~4t^`|f2S;N1lzJpHu51}CF4D$5!@@m@!ziqpBZrrth=eD*D z`}ggHZykq^ojcpM?Sap2ZSeEL{QYgO)MJ1X1C z)|CatR_=IuRN1tp2j=IWfIA1K9Eg?dtlU=i{J3SY=XO<8QCr8In@%coMQxMl0>&(v zS8>(N%iG&KpSd)AbkWxF=j1&Tm=g8TqE+%g-dw8wFkHJTNqv4v%o1twYWbUI-RH>9 zMJ_3lzZqFxB0rZ?9v}Pnyxzr0r=sEBcZ*l!f+a<-pSrfXNPF|pnO>>CM3xuJ-!QBx zihgrTkf%K1Yp+rAMbU8Y?~zMNjfD?v zEc)@(&2u^XJ-o3L6u%Z%KWXO9$@Oa=II*g3b?a96c=(rTQ^ z>+w>QAN;gM*N)z*?z7mVJnyZt*=;4GH*Yho_wHTcJwY}v`H!KWoZV3M;og3;pHswF zZoT;8#>-_Jiaa9s4FAWvk2bHG88Bt>HXXCick8L+b51Q*P0!joFLU&^6$hS4?tLhz zDE@lPs_37$hnEKGrd0n@kYD(GpPMVMSInI<+JDDTrY$Pz(1S~AVn;@P)AyZi71!1W z^;V#Yio#*-r=Lcv+U~fu##UtaD>LRib+4z9qB!q=A-?sjeAJ?mNg z(ZS{&zczg5xjE(@J*t|0Zphr2<%S!{5AUz{_oJwVQ|b>cC~Tb5FY5f-e>5(=xFC3I zC^LQ4vgCE@rt_<)sxjs84O#OJu6M6H_0Rpy&;C|_CbT(u?c;SDv8n6w51k4i@YP*( zHGEUmlRWr|fTZ1^Iu6?zFRaQs{vq0qrh9Z;iuch*dk)qQbscu;Cs38n2b zI%RsF6LXh(`cy36mAUT24`v;GVVT#?%2Z9>?DnD!>-!1dtyce$ooN2w9 zy9oElH|O@20aLf#DOy$k&f@yhFI+giQ|YsRi0`(uGgtHr-*x$sht@vwXyL9c`-ZPS zxn@`T+ULU-#)~%IKSKKea@< zxwKSMaOA}o=3Yqt;LF$U{cP0E(oruyxMts)r}xx-^up`D>mL7T$LwK;ZvK9xhtH9a zmkwq594M)Nks5U*DD3dEZ#S%qf#LC8^;W{Z;Xn6TKT|UE!Af&kZpg{>K=*2|r&pK9 ze^Zi|L=cF}X6Xe5|zJHuoKE-%Q?lDQ)q5&C`$X zT=e*^*&A;?>(#hw@9jqdZmpd4``mA+Upw%3n)EV$}=Xuscw zs`jR2tGC{J_(1LT12um>k^SECiJ1o#y?m*8TPv6TQn9|OV#blEueVMJd$Q=Gr>S@9 zGgq&1^Iqe3V_Yxw^Ytmo6aP4s{7Osq%Sn^-C#9Xwn;f&X`j|)F#^v{Kd}937t&?`t zr^Ibl3@k{`K40_FQ*w{<(~`cNa_T@e6{WhgGXCprW24Sj4+^f1`f_S~TG;4Ifz@6K zUR6`2Tc`R@8Xa+R+&?|aCLJnx{Oh59^~E=GYc`IIeAO%Dz<;uS3iS2y^-;TLj;b8} zMvwA$jb*Q>vX@4t=D$&pzS5_9OJU}<{JcDA(b94M+8Xg_ecxYxn&5e}sN#5J+?wm3 zKKbV2IV*B%pZj!K%ggEKOk1A$pyzJ&(Iq<`%FJHz&XB&L>8opBeCYhm%pY>&fA*wa zY3T9rQ$2^=)Blr{UO%)1DRK(+m8oM);~h$Mvc17pjg|`tMqJ)I9*c#QoIC{AF8D z-nXo|eRKPT_SAMs`|s^T+P`i8VR!#MRJ;Ek)b6{-tKEIi_qzw~IlMb%l?QiTJw`bPw0W~8{?o`CFWihWl_TlWF`oPgE7W`CAXP;@jvt-WBS@&Mtt-WvB z!Pg%8;u>E0IQq$;Db$b7L7_AXfkR&!1;ar?qdsuxO`{+<1kxw~4!vmPPrD&MI<$%! z@HRS4p|{YR6gq`YQs@MH{SkTtf4q)fqtHLls}%Yh+DM@dqx(}?!6=+U!6*!W^rKKH z3ZYP66bwEw>O-O4=m84p(Z>|Jj6R~!CG^h>bP@HSsYZAG4;1*_MquK?@kbc`=!Zfn6oUHVQVE6n zAUQrupin%LQRp!=n?jGHxfy8A&x7fl-v`l1PP;8~qsJent{p-LGtdFA6gtO0nMO&# zzeKwDY(UwEf%FPGm|ltIQs`auP5`>lpPoVwrYEDd6k3B;Q)tzZJnH3KltZCxltrOT zltH1fXbgo$qjU<5LTMBliN2*SxLxi+VGBf|56~G3-AG>8tbt`g3%SANN;=QO9FZOu!$> zh<=Y=M}P8JS&sW~`t4x=9ZRDaI7HDX8V>i+aF&CH(kPP7xq;}LH0}0jJYBJ!TD%Qy zrO@+e3xzhLO$2iQ+VCM=NnfNZP!xsAQCZHN5i}*Gsea4=&qTIia&K{Nsb-@FLrFb}x8`r3wO2K6Ru6R!tTmWrQ9wDT08qnu?%NhUx literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..d10d1da8d32b4018c2ec74a7e0dfd1e99d0834ca GIT binary patch literal 24273 zcmeHP33yahvcC6_glv$7bdx~91V~K6q&o=-CJ-Qm5E2L>hJ;PD>2%+8n)GHbfxtMT z10x~GB8Vfhi->@LijLwk>Y#($e9Geb^!d;?132T%DB>tQc~$4Gz1*ZhXTEvg_Zm8V z`z(LesZ*z_PMv$ZVPa#UCQ6e5|DODK_qWk?8lqw7o6#`I43qQO>*L~oH6`WndUy53 zNXskGHWvP^-@NZ!q}VnVdSKgSYy`_dgF*1`&6D44>KCfPu|3=0eP`GCS4Tj@c=&hm zvBO;<8y;t&#d9qFfAqVPx7TUhl(AS%w5C50i~QkA14~#Jyn$|6?8A{UF$^>3cfWXk z$v1iL@1OTXBV3$6eQ@@(zj%K0H|O8mKl{)v&#x*OIqTrUL${2JyLf)qfjNh6d1Z;_ zi+A_8A6oeQ_unj>b(j%fKzbqk`{OUa+mX-m#_o792ZMxSte`xpyHb0FHovM;^TMbQ}V@LB=?gWE@IUb91v|F#lGqBXDx-@DF7R=l1gI zm-qA+2ay;&AGBeAO@GZlURf~}bpZdxY5)laOHfV1Y>}u*)Vz9n^*uyXB;bOABpyGN zaLI9IAV@U={(b+_r5Au9o)9Au>a{jmldL&)eDj7_u`e3YQmHO_gx@G|^mSh3kV6nqA`c+mV|2Lh*qp>2hUX>GE^=^22Be`9O?> z{5SdXNUWwt0J|KkDHln{NtYMmG*gAr7vlL+4N1E^xTbG|+u3Thahg;H)_aDANhP&a zwm-wzU^Q02*SMLS+sb)r9cFHE)}Xor12SNF;SycOLhXQ=UZbhq>o%IW2BX_+^;(^d ztaNBqp==e~L#eKs#YvrU~Y zPHv+mY3XT~le3w0P5(4l#;9rmMr~vEF1CD* zWZq>%uf$xV)o8W9>aQ8Ez*H;%3RR`8%D#Y5qAQS{f)Z*sLLHN#vZ7+sYQ#lD7m7F$ ze(M5AxV2z(F0$EA6Cffwl+usuST?7exm%wu{X^7y;;_KT3@j zos<&&1fsv_kG?qM5QK&5H!NI>l-_*p{Oq?Q%SVGpLj~Yx?>vg7#r!i^(YVe)+KF_nyo{(9WmMD?rQHGyqD93%5K}%He5gE6?_h*M7K(E8t>p+T35(HK%nO8qOryx_cQ?oC~AiR#h ziXi6|Ajo&Hk(Ic3AwGbM|3HAf8sI|vRXkYWcx^OXFlq6rUW?V+=4;WJoOZp%ZEQ6< zjCxn6+vzbmU7S9ngL8Ypb!2dhUEn3qCFDD}MfpazsmAKpVem0kjiqw{--m{HT-7|LulIoo86=P-h({|L*l{F zT{obxtVmZlx>Q$KRt%qGiVKPh*rI5I!2zz+U`Q|+ka?%s-~~^l{O$w)Y%nB3JsTPCx)CzAIeteFhs{EySNFXu5>ody(x(*Y716kKO@c)Ui3iPHvG zA&==$>h~>Qgvu|v zPNkxyq@<p^(lf&O3hB2c_?7KQTSn2qy>}>9RAUvaRFtlX2jZ~0cD}^25tHJvo8bMF@Ks7|4cwxsJ??8 zzh3Z{fOctXA50t+P!>uCVmsW(qTGl1emgt?KL7vwRmp?(g3Jc6k|jJF!3G+`%gQ3& zwyld~gG}OXTBcl&2E)^lh|C+tH|aIs|I?%RLQ_9k#m4}L?LUcXw)aRha?MY!%-Atn zs^sWYH0f!wCI9kyH0fd1{r$>?XriJ6vL{*|LweK(wrFDvk@Z31V|NTuRgB);){z(@ zDNd-E<5&!#$|Mmx1uRIOG{EV79z#&cRDI}$7~-dWY7@0QmV}{|s$@3C)|_O=qN>6h zO)b8LiQe<(qU{i+(U-7mBa`=X3ruY6t%rGh^Tx$EAc*#}mcRbG0U@ zV+Uk{5#Ln0vXY63?4OyKnKa3sIDA4!`NR_Y=#&vdhZYS>%}dHmOi#(^KPWDHLf(YA z>DQ;EWK`6QsV$tiaPritg(LIIOR9&CsJD%-&#$YPIC-wVBD2ykes=lz%9>e~6H9X{ za|Rbw=%QN8a2zdCZKmDFd$8WcAM* zHmo2sCp{x0H)UwjglQF{%=yhz+p-1^8jz8eot9DI9`8vVm0dAv%;fTslP63YS5`AK z^SbMAzgMw2_KxM)tn==y@O*5Vd(XL)tblu7f;N+!ot$5+>lnLK9fxC!3kX|@S- zXBp}WT1VSbv<0nWQwC%X)DEw$sEExRoRAk^7@s>NIVnmC)@ZEehL{dQZk@&HGP>LK zK95^(bDE4cy~U~bxJ`PYUJt>m2^xvbO>IVpn=^H&cfb}QIuwQ23Yb0ZRldVRH$!;wjRCF#bb zUsO;us<5c2u%NJ{tgsk@%hFM01%)lG#pclxb$tuc7DIEz06dV0GI@cTs)+a&&_#_8 zeS3%A&eE2*w8e;IDdDsN|XRD0;wd)S*TT zJ0Pyb+2U)pIsqSf;@oQOX)8V77Dnx)Lzw8T}&<)fx{Aq$K`aRM40CREK10D)bQJ1FLk1)SJU;;0l!%f6vXB$; zfUTR<%Pb;ydYRqe35A0rTi_%&a9q0@DV&lzr_&W;*-Ci;>NmtR0PyPJESP)ZS5dXi z=V{~2figmu7&az26^e+H+EW4y{g;A|S|MAKt7`xrejEo9ti_2d2|O5R2+hL@Uv>4| zqGpHD-C1LFm?7tLlTN3*xo0;U8VL2HaI~&S4}Kqk!WlH=k!&D%2qc`41MI9t#Wis* zuM@O-GR?$v6mOuZG}JF{q$GPHYeB7DS|? z!F;f4x0*r87a`+0d9{$$bo+SQfoiLh3#g(BqbdpOrGoVZnH1T;rv!(Z&J zoXxt}!ci;jaez?-mZx)Gx7Fmy)lD+GFuyK*4}-%XG@R}NU9sN8BN^;GbZdaMmIDT57=s=V^?nj{)({>F z%SM%t0GNPyMp{JTvV!UJ3kMdt;dF;O>0aU(jA}fcuy%9uCK(|PfcrqeG+B){>k>3m zz^OWm;e|3H1yEuhA0vN(08VEw49by9P9B|Or>)Z0+KS9sAW94{X3{){M8#QCi?al5 z2!yVZDI2W%&eHOs$YSvLC}Vctwzp`ig1ej5cqZj?aX^ z77o5lYU5y|er@ZPhtu>b^$ae+G7zqs+n^3d#yb7nT$i6qn?iOr@h*N41PH zaV2H^rUda(^6F3!_yRlFE-abR2tlAZmKTY%kDIX>E0M zINQ~8<~_$)817MH5-LoD!ScqDw;G|bXoNl&Vh#|UMEEPkZAWpZqe!H^bplS z4qN<+nhJOR@bJ`#R?^gPfXC@|-_avMnYy9FOIgynTKpGqXmyv8Mp919BgT$&FtAxP z_9zEjGr=Iax>ii8T|-ZaJO!A^XVpAG-U{zhvYMQ3m*Ri}q&{6ba>>=f_R9i(3pZJ+ zenGZxp#afbmOzGr-YEpq6w~Mqr#ybHL zOa__#C{8 z2;^Sc3(Jd6WF_nr7zLU%B2A*PMjXTQ^P7!6=IiDzrzQ%o+N(SL?=`;pcIDT5p&_Q0`U(v;rl z0j5J1ry!4jaO0;?E}5`iBx}IC9q8Q~ij=&(2gbhpzT8Kx`L1?vw% zx7EdP93(^Od7yB3^?4x8KzjoHMh*|Nkppn5*X#>rNDvth7!D_jc(ztLpcVK~%(&>i z{6((lY_r<=G%O9>0#;yH&Rhi33sB*3fAwBWVeI{~_S73Rxvefm1BEf{rMeM)3HAZL5?{AZV@m^;TT~a`+Jh zAT3r{gZ|SN)Ko!>Tg;hY&j8!5HmCAdsh;|{eBK!?3*o%K%K2j%%?GS}Ck2CJD>vj) zK?B3*UgAWtO2XnCvKDry`q569Vpeb;KFlX(?uH`F zEl4zxte=Yt<`LL}U=z-I|0T?c1L=(`%yi+2s9te^37mo*!BT%#b$+FSaIbndA#xUu zh(j>v$7nAi4>>rq6F1rTP2&o#LwpX+hwN2GE3AZar*%lwwW15)? z9c;;&OE~Fq+8|KVn;m$wCj4y_O7M0nuc__F-)^QPfn*?E!qfCuHgS~lP89b#z>|Dr zfhpCQUZ=|y^2Sqt@dP`k{jfvc^&T8J<72M+g+>kr zpUi| z#G=K<@pNH-AX7E?%r1ERmOoXMU3f@L%CH?0!t#y<+&13kPnXwE%WCLNj2d5!9P(xB z1PSs_`I`CP&wx!8-bOAJyU|@WSz#rp-zVIQ6hceZLw|?`pDjj4{PpHXGp)L8+!(Uv zv3s0n3;nNmmLzBPPoV+w3+ zU2r53@)-?mr^mnq)2#uw5Q+lw9}=h;eW)HJ9vHa!9h3Dcn;w8r55w;d87<*#jz2Es zgLL|M*-ShbQTSRCjMR&kLBs~(8Jo9{AVzf&_FMILekMe?>`nh!oD>` z0zvGx#IKvdXg7~8rjyE3#T0k%B!%>~e=eLJo`8ra*VZ>R*3O_PEfDp|lWBK&_3jS+ zP4IgdoY!l-`KAuD$9Xe6;n)Y>1@MlAH$KpQN_fv%=yW^>KaJOCN5q2>_@g)ju7^i} zdwF4dz6o9>CgATBEeq&F+g!Qw;)fTn++K)vu6<8lxt;y=fp>qn(mRC!uz!=U{Fr|G zk~F3TkKvdWtogq`rsM%A;~hAYKiTU^K(Y$&PvA+~vDxptPk;=cWTOZRud%1!`^V26 zRgM@Y#jxwx!`iJ8_*eRwvRVx!StCz#Yv6V@gmCI9;TJWNJ zb#zh-R6(2QB-$8C{5b#D5{h0=qNAf@y1Q0)cdzJLf5*nI)p*$r1@L)$*M>Ft1!`#7 zU2D6$+aB%iGIl-H)z!78tLtv4Ti0#h+cn|6uGX%uhh}f++TL{!)IHL*`i8EqPie2! z&@TC{Whd58K3bLg;ntd}J!f`J?$%mM|I}5VbHl@0cJHrWtlzcnPkm5~7tg-);C*X9 z8MoxyvmZZUSvz!G=Zwm4Z*Bi8@h!b@_Quy2?mRkf>4h&p*ibcNzLEU6g3kNi5jAH1T@mrT!cct8AVwf)4DuWBvN55Hx^mj4(qck_*Bx+d2? zyzBG*XFqy9Ipx34<*th7j%?gRJPaFq`~i%+?_Ee_wnibrhc&C zvB+=M<*l=xXsoIo_Es~q^}_O3R~hX!<6nx4i({CFK5NeW&E$Pu z-#prATmSA~_Wp6r-j}*O>z1y0bjxqjw%&bOW86P@;rrg-KltZ^(?0s@Q}ebJhpIJS z&3w^#`nP$1{Mqc>yZ14rUleK4vyFo27MS(_E#uV4JTN0+V{>xz71@z$xgcQ0JF-(KIg z`A*aMJ9mGu=6`Ct+?Q_M_~GsA($;PKVC^Sm-LABIzIZL=(VS6DdsE+j=){D~*#0{Y z9C_km-K)hH`t6Jvn)WbPHE?X@!I#IMIj6ZT>;4hW0|OzO5NS8}`ln!D=T{C+T;`|9IQ z=f7O?oNZI`(&qX5JrfUYeCThVtero$?p%Cjv+i;3$?Ado?IiZ=+s-~VVa4$?P49hH z{?;GvEU91p`VV({+Mlj}_UMqewr1YFmDzIWjV~U5r~C1Q0|$>@dZKjjbH4hO?O(3E z{PEh8Z;iOOa4Prmo_n(|HGa`F{@B6Cvfips`F_=$@`D5KKXPuw>H3+=Cr!C-+S0#2 zwat5G!HsX&(vQcvUwiU*yPy5VtaH2OO{ts3>DNtueBbOBBAMZDjXC$sgsPLPPM%J_ z`OFtBTMMjNFa4qUvzD8lxxQa~eEgHIRjep{^og?SNJ>rGO*QUNPMt9)vb$J_4?b&kA$%T`ix@p-{b9ZFVyZ`a~cepoi9R9|Nn>XK| zI%e}nG28M6cfC4yd*kL!^U^z@!9xqzmyTT-JLrMCY9AaK*R{g>VC%el-<`Yd?Ps>7 zj$QSNb^G43ohQ4tIX>7Oe`(cgd23H~ZoBo7-FxQl{P^5f0AG@}`|poznKi!y3NK$; z`k$|Djd{-H3|>quLt5ci9Eg!2ECdjJW8!x(Bm%Ay%2G-haswI7kP=eNkWnOwAw{H+ zAq6A?esn9>&FF50)khq}&JY`EXGlDWV@NFY9ksI*n@rxsOhu?^9S!*P#uR4O0Fum* zB$CLG1k#@&{ot3oNF0fUTI+p`0~x)ARFO&iD1YyL(*rhyI2d9l zHiopz41GM3IfL+@na`RSVk96b<}Z5YD=hnZ;Xej4L`$pz<47b-47-T!&qAZ51U52- zji5zKg`&aiuQO;-Cfmej(^iAomJF7cev+Qm6|?ya`5D1(Wo#dIJWB`**Av-Fwwj$t zDv;^~hK#2zZe(-WS?n;9!;oR*di;~kkSsESA;WQ~nIwZD*O8$N8A8$-GMEfvNE+>U z8|!25q~w3ej#&vm;Y^s<7BGM48OeZ{50YI}a!H5SaYxtz3`s`4C?|Bz`pG(w^m{h% zO*+)K*!}DYb{~0!A$#H1aL67I%}4A*pR&7opwHN`XF>H){gVEZUHm!i^mq1t_9A8Q zTXy?pb{pUH3cD_X^8X!cVSixxIWgVBwB1bRGvp>Rk0Eo(oTR~Ib_SW1NSZUrOg5XW zC2JURH(AY)ZnBCYU1TLg?jpZp$O^KYA-^PdGUN`jj3Kv^Uohl0@^hZnB(k}N(rP9f z*?D9GxrZU^X@Qa4%eLYgNwSbs8=;Dz308z#$pdT$Z5BuRCCYkbX=H3Sk_Lv%AoUEH zPU;vkjZ9@oE!A;`@pRfTvJVItn77fN!7BsaY;yIfRd<`t?ZRt#!)xLy{l50 z@;G)36!-D`>aYRoAw-4xRM0BFSLv&!OPZa)3q-|GACG?+=KNyKbN*wVJOs;cyXBkA F{{=`7TCxBD literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..2a53f1519cf07afe145b7d5a25cf54031773d93b GIT binary patch literal 77958 zcmeFa34Bx4@;IJc3bds_OGzoRFD-4M*}AiKhtd{G7Z!n*G`Vd<)1)M6OL;D+sBCVC zEQ+ECh=73jKta$d-~&P2P*4_eeTs{_4-P$|f-LZ#@YP!y||#Vz_gXWyMKrTz1D`^Rza z-vM?o{JV1X%_jnR*kHhbu=AAxj6%sM6bk&sAIKdI|2|#uz>;qF+sgsGz9{JI*BcXhLH0i%s_8(W{xAH2dc4En|I^G4j* ztGE5tVc^A++bc5XUk&IG0-K+qbazN(WY5U3T`kSKTmIPH(r0(ew|iQC*x7RH?v^#XTgrF0e7(D+ z+wPW!cDJnE-LjKBwBOy*xVvTkZp`!L?v_(8w!HUp%cJ{S3OBVlUuo&Tx8>8#Ee`_t z!Iqp&Eem$H+_Af5)9#kXI=wc>(K38}%ftIymTqr(W_L@??v|5pxBRrGW%uEhqAe}s zced==*D`f~%iSAWwjFHQxT(c-xFvE!%bHy+dv~|ox4Y%Lmkw7azxMIO*XFK!b;I!% z$I6!EphG~z@jLgGOuhpoY+RpSgG?VDS{zicO%*vNe#f#^v-FQ2TD*PksMiuloLRl$ zr{JxtBc|RLzoXysSwCgBY_{C--0qo2(dmckcX?%UN;xVz=wsju})Y}wP&qTAYX zXm88lRd3Ha|9sjDEi$dsS`xJ0F!;Cj;C~;4o(F9Q{{vJggS}A(OUug2Knm#xl|$11 z{^!D=;!zafA3|tfRbSPn^A{fraaArnrq0@|wbu*NT?IBm<9J2_PH->?y^Lxn!#!UN zP%Ul8J^$WOb)^IM{Bsvo4flAatMGU*RCwGPCOmHIrn;HSyD(gp%RL@@HArSfv`tr7nn2jpxB4Xa*J2^GdIA9gFO?Dzk-EMJhl+^j9e&nY?VvMW~&M0gFcw z*{OuJn^{Mm)x^$?x~X7mcz@`1*twwUipcO1r@>h3v>S|Uk-_dXJIyw0RByhXWMP{xpqSx>+pmM&SzOKFBOqNrRF+zg28HYSeAzMMkeyD!$fw@ zZm=7$NyvLo#2t#DZEYsk3*c8^maPt%F3T~V&mFmzaY6EY3d?4z3=I}1ZQ0W1de)UE zBA{pQo>#g>D|)NC^n}iYqNY+r1Vqe@m>V59a;k?u_SDeO=U_Ad*;FVZp6svc9nVzcPo)T##sAojXh@P(;IhAPbNTR|ij$nf0pZ!%45ep+#YJwwx;E z=&~yvqJQrQ==rfJGLRpz#Fcn(0HFUtihizaCAi)ysTg)hT3}5?P4v%QMkeF%hH^lO z-!uUDz3nDdI>&D$z75!+W4o}Q>FSRC6~un&nz4f_aA7|f>W=*cp}p5N(#|UyR|#9g z+)KDX=y#Qnr05XpMS?i(M6o#|oYC95MKABxo3MKrba3o;!1YklbLJ03uJ@KuE>>fJ zB_JX4FPWl%hjVH+4zSq58Op=y+ShWp#+rokDb*`U->*bewi2ct%Ey zj6SRq7(t1+giv3Jpg?d_1^p}O0|d$Vi1E=2A_OE<`WFlWBtm^?vJ0v0y9uz%8BAwM{XH7$;+GwlORND8P-?sD()4Ow za;jRFmZ;XH4A~I6A z2g=N7jkT|A1}s5Fo7HJBTUmP~KG|&+3u~{ivyEnKii#S8qo%#l)?jr8@?~{4TN@l~ zg{^WnG>M}V{pNs1VLQ~ZbvAo*H2oMZ$v2!J8BX#ICnXP-WD~s*X9~hDI;ZeM{uUu+!N?MZ1n38I$?BaS2wRd)xe$@~pA>#IEPf8yMe0H)`4HgIM zlwl}R`Aq-9PQ-jtj9i}jPSsm+cWT0z?(O;obZ)Qe(5GX!z|i2Hoq|FXU4pvy3RCvz z9eGpVKK*Y}F5UY1rEgw&@iPsDRov6P^WEGd|1mOTLPEsUa+{vV7XkRf13R~dkBp@! zwar&4q7eANB>e5JT{7bce2UZhJsT%yiUbnQ~dy50mxhbaQrZqo(e#tG3ucNno$e%&*8#AWMzWX`YYfVE- z3wb{1-V;w^dHna7zB4R7!N)m5xe36x)M=srO%;26FKdFcpor^?{7Q zSx)D_6v((Um@3X=;uqX#M%4fLK*odFevTYJ%Ygbdnk)#yGNp|vI|3{Z!Wz;RmX3Fu zL~q1n-%BK2T*1TQ6f*jq3SxZ7hjeiJI*9QiEz}se62$nF8Kw5YV8$gi6li@g<5hNA z88=@FW(3iaHGqymae}K`#_ z)a0%Mx(`W7&?cr9P8pjwe8!B)DdkgACr=uFb9!m_vL3pdV+Irt=o3+0GH2}Ug7|V% zZByyX1soM+NhMVDH$VD67=eUiAl3kZ|XZBabQW${`!$| z3EKFKPD9d%kBCi-i&e)2bqg6VeDIjz+JOm!hL0YV0$FlW%er>yGN4DFo07AmQ~T(8 z>IZi18`pP0W>Mboyy=rh^cte--CI39w$R*dkgChTF5U902gHPj_t(c}_Q~lHH)>c` zVOq|Nywb9QvA3%F_SIHpG>_0m<;?1v6gy^6^o)K3`}T{@(qs-B7nhq^n3*y>Z&IyR zo2MP8%Nwd4+byh1RF{~+F;OvbG1{08L_we_X5`IG(tl_QvBxB~rN{)u>4BYfI5brj zu-DWueE^3NKIR%6PECWut^uS5i>BJ9aoCL-X?Bf?=BS~+xr8*{#Pq5iz)n+i} z{l({rYXaLe#SU{CJC9ORJ`AG1@KYMULbbC@k@>TzBHIJoIWCeDLc zAhpWU;HdG$B>~d_kkbZ)N>XKzv2|u=0k9BfV6Eka*#~PiL2gH`%}zY8QiyuH+3L)~ z$wd*J5w%Jrsi8ZOO~}b&$^;IPv>cLQtj%s~aN#G&l-c( z1XCh)3Cz*haiyh2o!P3$DtfH|o?d zPv7V(1K4HRkCCj!kVDFg*UBWfPnU>->MVxd0J zF4kBz$tSKe=NBO}QD`%$vK15-DhiM&t5f7KaTx7}dKNIzhXR|e9`KPwqTT#Pp`kz( z7Qo=v0@q886P9(TOUONQGE1e7Bq!tlBLrZCq?Aiy1f%o?g~a7ptcFhMm&@{sTWjOt#`y@@J7E zvWQ54qOzJw?Jyt^#$8ieUVk%Y@e&uS2IbnignCFD?@a;*;#dT_S3o(zW?~6ixw?5p zq_F~61^V{@SB20c{Hm%~q-Z0Q8XUFWdF>Sxx=jshYQWJ!sYg11v_8#?`l(jZHRt zEi`US96n5HU>iVa(Eysj&ULm)W+);J{ivw7*(}#mS%I7?^IX<6g%DQ+xIpQ<)NJdO z6grN^WCqVWm=(<$J8QvpZm6ozM`P7x^(GKLxu*1iLO|tDmvG?`{IJ|u1eBOqi_?%n zY)BmTF)tVZ*_i5^CU;z5*8>dic6a#2`nFI;X>=d2#==O~md0?W8aRMfK0o}y5X#MM zqy{W#6}s>*QQTwJCaiLV`XmK0$A+Vp(mnL7Sv^6?1*6+0`mSC1n|SWv1*upnZ{jX!PH^aU`01u8;dPw zVbNFAD)L=W4P1foz-gn?L@%t7hxM=fg7*gIpHk}O@j-VKkrJ!UsjFm7Ce|dX1i$^_ z8q~eG*XMv$LewUGxYi*`K&fFVLTN3pgIHnkBPt5aKBqFy4u@Jpx6U1D$l7W`4l0XTOo_(#QTi8a{lH{i#NCnX0wAsn} zkADo~hgw|aFJCO{b9coLF^M>mY6d=m&(bnw1NU0F3l-I}&F-4naAM;Daf! zRZIBn=GPRQ*91xfoJALjWK^tcGCON+O@IY<+*!es2ix@3CbPlf@>7;#npYb@4uNYf zsY}zQ=#sU`iD`*R@%k!la%w_aVzMr&3d|7$Za`}S&o*qM^Mfd;b2iciiyYvUA?qo` zYsyd6K&b(x)HNJSs^I+%AhG&>3kz8yDdIJ(baE@6XQMFTJl137(GHpo_Rne13+H9Z z>~(1NblcQ}W~K)fN^OD)$C@VYmR&hw;(%Sy6?9JtAg;0Jd#KKQ{% zcc$i;!z+Z;b53D0tR~V~T7xBS4d{eu{*hRN+|&lpb$Yp@db}r6TS_QKq?4f9jtJASX7SsDqRB1W-9FlYHeUH5T>~>{{>g0{3t8} z!Z9fomffIV5&RORCCVp8o?!%T|qlL;xgBSkI1were3Nr58b z;MkXg%>@<90$mnAxu-D`5fkIhnk}TEE{gadpw^XVWH{3 zAi{Qn{UR2!(+4z;NHzJ!fpma>g|>tgm!$217{29AwmjHQq?3N-2M081CRTZl*}MgT zyA%-@H7KAd?U)h^zC>q@&EzFHvl(TIe>s^LAryeV@zzej6o;mcg%uc(1=2RK%+XLk znV#GbGf4)6q=m$o559^|+n4^gE!uvoZ zj39XSnRGx-VX4Vy8>{4j4K~5R-aV;+QED^}y&)ciO8_Cv#ng&w8?JD|CO)JIR(R;f zA(~c!8St0nfR`Xx@**lJhNv=iim%eJXmx$%wbF`|sI^9WbG_51nVY0dqt;4B$pjtv zQ9<-Q1zD7tw{>8zraJ~yZyt$R;3zX!nPF{B@{Sk)qIaSm>wsYxCf~IzEOWx(!G9yJ z=&THc@uoF|yi!P%;}o@?uGA!Ib=m~IHbJXPjjz&~lJ!Ydx|Bq1ss^@9y1A*nprfNx zf{0>6lS2c)@eWfh2uggTUV}T#aKjb%>6$(AG6GIlQ@stofqmj#GW1EJEMgcKTIVyceg`>$(4Q*dxf}QHZ&h5J`zKrWPqh90E1QHxjrNY5OSiwfjadiOF}R~8B?l@Qs~*cbWr=Ge_h70@x&HSdb>p6hK;AyK*lM8zldkU| zvsvT@oMpC%_O-zj&(UB3{cLsm(#B$x$oOQ}2o2kww5drtZDM>?nn{JT%9SiKsf!2~C_ZFVe_9_!YCZo*BBK1O!F z_B=2uJ8sWcf=;K?YQZIE0Y@m~CJENiAYksx783I!= zlLIz@njD&D))@~M_JF3g$HM|ugQ*Eu1LMJzg5B@HOX9<@S*y%+-Y6c>M5X$V5XB)0 z&u6$0^R}ehs?lti78^D7)nrCQMr)my(#1K6ro*!o9H2zQfeGuFV~ID}%_t^y&F}@> zq}*UF@ydwmCIf_pq`+utgbNPH);c%RWk85Tim)0SuuU8;Mj(5$VBN+HQe*SkvE(7d z-3x7hT_Vl_;tgr0Bwdn0n`SiX!5VYo{LN+Xy2OaL>~Q0n-C%Lo5d3TMnPmYPchA9@ z&M=3^ZNuK*at9p9F&`rfI*hn4aVe0+(-uZJ{s3d0r(&T8g9Hd-I$`s=LyD0|+Ptw2 zj~#*JG}hVb%$i1XojyrJ3@)l9l9Q8Cy~vdm6pKzs23MR21&2sdpmb`3(e`&^1qv&g4AKpXlUgJWZcgsq7b46j z&|P3+9Y!z|a;yz-zNNtm2A-qFU@2i)Jm*FFJw5W}&DVofC*BzktWbN0kvJSszqXAW zTo!O6r%f2ZI4m|aA(l3Q-7r@UR^8>w@u)i(CD5GU_n_1fhba7^{u`QB0pBZN6QI>t zQvtoLn)T+u@fW#B_atf)bg8;T*chsdPfIlD^rmDZ>>5qch|BXPM&4*AN?$8dn&6r- zHbV{LXFF_G@g$aRilzA_Lgo^72`o~sgZ-@@I6$S-UF~iIxO~Cyi`I4w9)77E{S;ov|`C$*9*Qr?AOsrb?qxo2Fy+X$hvvlvHDKVv;FAXG%** zO-`n$;Vi5HFO^D8jJLqq@wxHU)&`BmTxnE0Z0h8sN}XPxoN6>wv1!Sb30gy9B~%2q zyBgVKeL`hYqP8+6F{P5*)8P%5$%4rAhBSSpj%CxdNtJ0jgGpa$NJ}v#S0=+uBF&hb zrmad$1|@4W_$I90S%Z5H5Tmxzz@{Xn7-9R2j!jHROHD{fW7$+gQk4nHU=1nSs!CR` zOVs~#l61Wq%!^Z*N1q4B2n&YcutHa^J#}DQ|6Ywp`pE~EW zg9zvA2lIz^#rqiNFT^`+TtE0marNrvD?ePlU5ClD*sfQvDqCL1zw-z5`|E1E`C~?3 zx!O_j@GZYz?dk&jAN)k?^y(I&PV;ZW+MIuo)Qr}tnAFMkzot&W?tj7WL)`Cw;NFZ^ zXV3WVEvW0ilwBA`sZ{1R*UXxQZ^oC8Dqmf0C@#0oDl0EHR94R_4=FF7H+e%q`94MYzA3ZI%Z=sb zl>mf)^BMuytb%f9dAVj8q+#kw(iGZhuQ-qV7zIsX+*UI}3 zX^Ptxm3HaerORI{rZs4yO82ekzM;7Dv~Ny)`Ng=T$$w6Nb=UI;OP^gn{1g*3{QDE* zzRcTG^GE5x_Mvs9<)syOPc+QhBUiF(*X+vy%Ha)Bw6J>d=tj~M*l}#{AXzBjz2h(MlfFOSDXd$VqW8R$ z-*-4Y>WhEewX#9o>s-_RALico!-t>V{p9qARz}$lo(o5h4c7GF8Cv4;<0iW1X@I}-6>3e5ZE?hE2(Z%p( z%CfQ}S621gVoG{@#FwL=JE;7AQrVUn{~5N>@cfhxzekt+`hH&2ceiy;i23!+!>gjc zHGZt^xcbfHLH!3E*L5;%3VHC)D)OsMNwa0qKNB#;KTm* z(!2YZ@A z#9NLHZD;>1oH=#+&y9QXthe?Zy7AJVv)Zi zOD-GtzdG~R2~BT*w`S}Z+jDgvJoCcLwQpsezUAAC7axCl|L#lM&Yb&)`F-2H`}g!} z`trlG8~;&1#9p-j#GV(rG@ZQr9MLI%H1)dF_iX3Ar*BWWtp5D$vX^%>&AfEaCw=z+ zHXSB>)^zI@1l;38)*VO*Z-E&udJ2S57*>jJ~JNd=TElqRICfRzlFYow0qnP!{ zfb7KAyLU4`eZKdYaVxj%?;Mpk?eWRA|494y@lQ6qbhNqYyKQ^UhVIigjrcz9+^+pc z=Qe$BJvZ}TFMrjT^~1c=g|7iE?Y`eTv&+6Wn(n%knea;3V{1=7b?G3Qp-Kf~RWNnT zGYV!S6Rlt-Fog~A+&F}sYJ zs$iy=;f}l`il-Fz^>f}(C^nT3P|UR4QmbHUnb``)%+x5to^0r=xW5n6Tfy{VZc;G) zm?A}ZL!^_wjeUB~i;9AnsY4Xcgl!E~F#VZq1(U^ODwqr=UBQgm(^K(EBr{y`@_j8! z70eQ5v4UB2Xqn>8hmPz~*w@WzQoPsr?m@+e2bk9t%xg@Gf_asBMUem-3KUElld52b zF+&wh9FwA8l9|y8W)zdF_-&yAF2g*e*bUlC%&{%VHsI4g=$~DR+ClXL6-+qOEo_kT zSd8*$DD!5N@?K?}@*d`H1+##;E0noYr+i+S3~26GFguv-VI!1xq$}qunA@4#6wIxQ zCWgMex<)O^OWy*glZx$8&0<%-WthrsSSP;()Q!qoBxKJi`j`Bfe zqq3F(Mn1rp6^w(iE0{S8pisbma_lOxZJP`(Vygf zI?cuZjqZDXGJPHkk(vTZH+^@xFMNm57N$tOdEA&I{VNz-GqcYf&xbECg@gC~#WZHc z0Lef7((OePyaxjv-ba!fK@phyN&jlfD-WQ-o=V<_U?%uhGxq&zHW|j;Buyb8zF@ z58qvP;f=uu_=iydx9%I`eTz@z7AM}ib<#s0k6XO)fuy+NUU~2S{CsZC>_M$fWU=J` zeLujfpOyIk;(u1h$gyg#^5ql1#$wd48r#}Czq$|$PYd3v>iG9o707~kDe!;yrz_v@ zRG@ma9g(qMkTW&hL<|wb(v|H#DN?q(eU`G_pZ%5Xe(SFcRzBDc{t){!@ZJs>Mr={n zA)`ZjKL76ni-P)%z9sVVrw8us+RwW5_aERPY!IaAKR*C@a+j{seRlYPuKh+|`Ca$f zzwYfibhPCUt?Bs6AwwsxN!|JR>-Tl-b&uuLj!$2|duWf*Xa3k>ICOu{u^skL{n2>f z?m;Q{<12XRw{6W--TVU&rd2$#b?ThY4&UD?Wwa%7r{TaooqBAW@q7Kh54;u(KZ>WH z*gJdkwqF-t*s}HhxXoKO?;U<&)6>h~Y5kkCFFf1l$h>b~-gIvG=4Ur`SQ58=>+TDd zEj#Z2&+KQPUc7krvs-sAUbgkcSC(9O?&(Jt&)%`=$2VthIlepi+fCb74BxzI(~39a zws>tBVCtD*{Qub}KO6{dJ5!mpXBROM5fMF#QwE$U{Ms3wd3Hf@%2V$b1~!C`Kf9nL z<;p3kOYQ7K)+TSsBz$Mjy?V*b3 zA&FmPZOs`t>kEr>a%TP50bd_`@YNTBnih69-w~VI>DJ^0r|!>OFg^C3yO;iF>Za3u z*Iy2ww?634BMX>~nOD!B35e{`L-A_&wa}p7K9luGuYzfD`5)e$fBI1VbGH1FC-d)_ zn}6?H`H4^GKQc9c)$@~Qin_Q~IOmdz~GonMiEdtAZgfUF&> z@?T$>-?TA5eN}$V;{5Md=67G6|Ldy!DM#6^U+KFz%=hjo_-00Op}M1XWv?|y2fi8g z_&41$<^(O+GxGHrZ>&})^jZ1J!}(txi#qX3_n&H?{qW2eACG4Y>hFJev3Bl=PTQg$ z{Ia<7T|K{>yz#-s?+#c0x9`e;b@`W0M2-98$7hzTj(Kju;T@2BL&9C>RQZ-;8<=0; z%dEY2r3%740sgI7b81Bp5oJ&js2+kW(;a3fVCRcDTFcme`Q0(S_5LjBO7gfAE-Zuga|KX<@Ef&Qa0)Pf~p;#$d4 zq$*OaJ9_t32sS6R5~?3~q6WjnP1^Fc&DSS1^EmkT@w1=b%ODPh;0Yv_2BQd6_Pe_m z2_Wa~6W}pQ+#@RvAA^u_v>tpwxnMq^+z0o3{5`~qbJY_A$_2we7)uB5@>>zzTQe+N?&t^D?&fHiu!bT*B97Pj zF6~b#qYjMk?n{Uka>>; z=N@nF(RLA_gEqI1J{|<6@B>X`AV2zuOL+-Px#UkNLcBRZ4{GhvPSL-20rdPpB_D6@ z9Ynu%h@=v2hzLjH%|U^nI~OaXm)$fn8Amf$1hcOZ|Airv*k^6P54-Lm9G8H9O)q!+ z^EH5fbIi5kr!nUQ{2P0_<6njNFI+2rQPer1j2(U4%h*Bc5G&Nduefu7Zt??LcHtfSHK#6j@<42$Gf2&4dv(c=o#gJF4s zgqw$@A&4S{bd0ta&+{N&>+OZUpBISAq@+sNm1lGd3)I7v9b@sq?m^0BNjMk&RAlIh=LK+M3X zxY%J)%H#WgUi6(P4qvZ7ejUdngU|GPR*hbs@Z9a0;Q_tc5A6`vu{YrBB;adD@F@wt zXuub(T=(3ezc_qkhTLuVt7&3yZhCH(VxO6gzzLQS*ZDO-iY7(z%Q9e>KxRFszi?~cNz8-^4Z^s26aoS)-jcTr2 z+MpBb<~;0%6TT|iEj2E|jgn$`Ih;enQ@>j329F}dt@~sX>G~d7Ci2F`R}P++8vuJDtFX~A5UeAzGrrC-ccv|K6b#u^>g5RB>ehQ=hpD^ zJmKl_Rvxwo_6_9i*pL5+tb|W&5A2pa-!n~-r%K>^YqVYOd!AOFc3zC@lW{Fz{- zFZ=7~U|(S>7k2U%rgBXu-(f0ubP|QBJlx59n94JqTwyAI=%gej$-)p(xXLvlBwXc= z5NWu|!y*2Jt2`4@qt4m~YUl#+elT>bpF>>XDqYs!^3JYQ&SqQR*&~RhD}RgUOKO_D zBYr^}fSRNiWnK{Ta$y(G(3k!jqpM_~c?+=>YjcwfC~r0Tv_M~y0p!b+gyQqGa1YCX z@@0ww_jy{qrwAZ-X$cn^+ndGsbr(^*P6-nhc6GfGPf%8g(p`|#Nv|M5ZBhkA<->N4jKhPD> zAko$)0XxE6kwoRDvG0i9%S>r6hA~882((-Y^B#C~O*ayF)Z2tbyU+J~{jqM+*rZF#LMJ^{c}BuY4YjcO2FD<+ucX0+jzoZ+^ozS~c*~E}DJ@pfw3ULI0$m z9dNXQpH5o(89ERHo7Oz>KsA2azTPq^qO{#{FjhF(8ACOrwddkSrnd2sBW~9bs z#`MyrYsRK!CyuU;%h05E?X8N8GDgM4#c2|9`VQ_Nkv2AbNRPoi2lt9mMRkcAI8e)l zPK_IxlsmzaJR!ayJtJpY*qHqE1XFz3_?oC*H--1_8Qrsgx_z`GG9fxWVdT)9G5MqN z(#EBaOE1Zqk(QJblpB&;Fg$1Yh*4vliTRc>(rr5n0|EPi2R|whxUu?qv=;TL94Gzj7!c)%1Noxs;iP~l18T_BuyGuJbcWE z;iHGAW(-dr*uxe+X>?}SUL(?nXH81a$ec8MOfI~OPR-G$j89I_Ozol0OxCK$CJj$b z9x-7?Sy5Krm=S4NnhZ@JbqO$BKVA<<-x75C#0g0w#-z_qP7P0(oH|mk*2U>Y4AIVR z8aZZka@FXpn^VVTj7v?A4<8ghr2pvL>BA?E>KoT*fT~NEZt5vryFdWh=$d|gbB2|U zo|ZDaEIxm5PR_K<^o-I`>7(V3IAP$HY^|=cD$$fI zT)?2I#IpIA)}jhHAOxrp;uCtWG0g!WqJyFQg&nktwT*C`9AZb8cF=%@g@h)P%yTE9%}C5ac{d2twb9GA-l zjUk--MJP3g1>)RUEK`7Q(zkFiF{Ee1*?0)032!|GRFWS8J`qHC0XDfN*S$1%e8q55 zR)PTTBsYq!C&v7@RAbe5JAAbJ9ytV(BN4X0wCUMydKkR1N@i| zgHFlkC%NDP0n7`vwvlWZ&H@&K0C-}DtA)k+avLDLg#7L2b|{rxL(T=$@z#IcpiwR9 z@szipSFF>U&S~Yyv*#@g)%JR$>(Dq{5W=WRc6AXy6~X{lzPt?$=TvT5=>E zMkX2bv;rZQk-ipE|!7Jsxy(a-f z=$-R&m337Ts}6T<<82;UFcE@Vn`=P_L4X82cMqKOp@ux~kdS2~Ujp1UMe(E7;R*mT zZA&BLB~^BtC6kZv3+-^j+L*%)jnGD-@Y=0rKAH#c%l`_&eIpnkTu;T8jQz>}L%Yd* zEekhZTHw~BwqQULhjNqGEGSr%o*TznS3hlAb9+*URJNslLdksf+KPbQSV(n}e8H#$ z$PN0ac8raMj_PY<9CV%va)G!vCK3D)3?mx?1k&hkUMNC3yu5-S4 zQnYDWhp2=4+{>#Iq292`Y8;^&&eE&y`T7*&}TTcw)zFtFgV|@d{ zdjMl108)F*J2)wcD-VWBhX!JFSPgIo6ZxpX5!5>{WP-a6Nf<5{B$|PqAi{gD0Ne;A zW)`rGZZ6^~EZkyCFRTIv8}N!h5@&|5I*k1?73bc6w%WZ@ z`lX5}BF;tQb_*n5H0M1sn}ExB`6bjAf_}rrXmwy`VWYs6r>+G0o!pfR7vlNBgtmGY z>jXkzIO55WGOdcWnCDirWa13L32ZofrdLZtY>zBMy|cm2-Gl9Yly{A89@wz4wCY5d zWD-mj{I!;nhVy`OD>&CGQoq;DY8hNILsv{3L|^1u44y7-br)R;ZUdp?rtr3-kjyx{Vo`OTHeWOwKX^% zz)(OGUo5$Xmexx>0j|m?CL;cYMxv80b?Z$G-4>jcz*K&Y2bd6B9_TqSuD^|6Kr9#o zlxozyqM-+^G~Vz>KUp;lv{nrvJ2D(aF_BAd+Rz=#F(~(5MqiGG-T;nh!+!9VEEDpG+ z8}vg=99(n{F$*|nkN28MQ4psDUNnw#ABsvt5y=aXl-33ZT(3+Zq5NW&-lczigOLj* z%tR?de<ZF`qk~5XwjAAH-$p7d<~qDfXa_rZul)7X!C`C@9pDHop@dL6NbhWY2yFYfD(~xs)`LcXNi7JI!)C#72qp*+ z5EBP~i@6kBQH59#@XD=~1P$$58^{)c#lQ@jmMPCMmkPu27HD$$Jn5pl4WfC=t(r^w zpa-kKoN}*;U-Cu~X*hi1*t>3gX%!oUUe>sXl!@0X(wlPr803Y>!2@k$tj)ATU|1vA zMU&9$kPWs&je9MH_HtRCa@cuqk7$>M#4MD9aifTO{QVx+WH{+Rlb zlEEw^5fuE{-XQ@bBnxAQsG1G1r$;l}0CC+ggbc3bV8k_?Yhvd`x2e?%yN5t+mt_}a z5pSlDh!Y||#8tM4bO^hZ0HKA3njw089RxWsSE5r8%LT3GDz&1)R9}H#rSc?;lMq}7 z?oHRA4GuNjqeQybjU`on9QQ_7F^hCrnt_8f(6Fh*P*)HBaM(r#dtiiQJGu!=%^0WY z#**ux#ew!lLi}T<-$aY#Nh04gWG(nX%vG$h*+{A@H)7w2enLWoT7-h6HFTqj`bI&j zFfx*8i2B=T6Bnp)EywY*OU^|~obqmIoCDV@hpAf|`zojZV%2_jO9@Z>(L`KPoWI+P z`_eT+Vx!6pa5Ot`*ND1=etOPRxeO5Ph-8Er6~t$NR^qh>n9)nJ`Y!%P|{ zp%NAo9g+hH^b0v8Ejc+MItw9ccG!vlOA|ypyB?w0iMJor2(d(qZA}hpjQZsbb|J!` zpB#D=1zoCIM+4I)K+v~DSf12t_3=j7cZ$&}5{>#~4J}vN&0I)C_|!m@x)`EIvVLeZ zSqy$sWQTGjMu#ji?<{Krvq|NV*=>Lk8mLtmBWyeJ6FET&t&akYAh{6uuvPpqabwXp zVK!3e>1wrlW*bpph+a1md41yQ2rdm^ssy&<;ub6{m7o(4rYCloNdgKIHQrq^S~bSS zsBwokB8Ip~fLSU6Z6qoeLMhgW_}3~pY(Fu@s;dy}`sx7tU);ldb4xW|`AN(6k7Rz@ z4b8w!csZ&OiCBu|YwQ)gRbF{Q3UhbDHEteFwU(YJNj11%n@TKTSbM5yk^wiuRuNYf z%@5n|`F(=`>7GV}+!GVO(@JBR*CeaVW7EAGe|cE2Ja*DRCl z-fyeJ!)Jl6(kmLBgd{FNoXbt&uPPA~0@2^Fa%}L)2Z`#3YsO)0A!7*$pc76Q*sPNi z+`DDlh7*EQLry$Y5X|<9#)P&ED`2xCRlIFBIe{DP1PWVMxYTflP;5M^lIFVX%vd$i z_BWz>6tBcwl5a1b5~vOxaPo-jxYZ`|RQ$M(h+gNmfYQQnnoMC!ZQOuCt=v{o(H${p z6=kCReQlgW`$E{o(rS^SihHI0+K?21mEmW-MMQApz0k|(t81qKrp@RPf=$gh8batp z@)ehI%d8L%o|}deqmmNq&UAi|lt+Z?Q96jat*PNQ8u=lkWE`d5DsYB3=b?KX7h-K> zUSy~zgyAM@vIkJ~n(WD-Z^<5-R=EO;dIks}4%?6$oHZgubeo3=lY6E8Qb80ub?;O9 zLG8%1$veOGUI0WM0$;N0}|t@>6Lg4!~%riOYj<-osZ zttK`dH`}06cuid5H4-fZ!pcGs6%Mo)uSrP@TA3PH+)TQNo2)d~RoX1quCEEsYs7_3 zj4HQPq4ezRvFaKC%(dC`h{km{iE*}wIuUj}<6M(k&+r9jb)wSbOjk9AJf&w8D475G zIh(mP0O>Lb)dE>uk&=}oS@OI@t8*_ zap>#xI$cttJ~1UeHL(f;A{)}QY>FOdl0unStfYr}(g#R8gy43yWollMvbp%VjHPvZ24Fq#j)AuF1A%bIHQW zF~BDlj!C)TUlrx!St0dJ0>44)!PM+DC&DRH7b123ZX3y93kyym25Nl6}e zffMWoH$J#kgF9~OwFL1T8-`-sr-J-zOMdP$4JkKeC-GbX1OTUg)UXzHOb-jNHH#cf zUBdF~z=1@NQ?~_VGbq_{kJdh>lPzR)nG(Mu&6q@XqH}%;`D9UUdR#_ylLu)|JHd|# zLC8q)G?|vm6+}T};$Q%`I$*Jh&tkG=+Dx#Gr@q;97AS1p;DMko@o7LfpUR_Z!WB@& zCd$K22snqak0Q7!R-5#f(*a&-n7?M4oke!G$~>3Y5j6(8=^7oQrF`9@l#x*f&9RV)=)WQGGHY#8_X6m^{vDQPf3t~(M2IBjrYxHsw{9j6$l-lfUZz* zL&)3~#OI(OVEj-}4_qKPRB{D?>Um~7>&MG8`Sc`Il3hb!WVs@#G+7&#R1pf|npfZq z3p>aQ;2bS3`m~l8#(UNZ7wZ@u;1z?bzu=IM%Xs#~9b`AamT#*GE&-t~p|(N8nCdXs zw-JVQ3fqG{`lm1i`K|C}k?qSKyGWhYkzgFn_uL>_+w| zQ9wi7=dg=irW7)W3xY!h5;O(a$0*jhFxq<-FU`)&aTVnCAfX=mFx>0}ot)D>qL%T? z(t-&N(F1>*eafK+%0;Bfn&YTJX%8e5S@At2^7+BM%rO{i;O;QE?!(eh=Z{-N7L%XT z4I?Q8<`-~Vj33qUAa7!o|14-VCnzz`y-y-2->pwZD>`T81c!3oe*~^Gi_0nd%7X{8B!$OP&zqk*xGU`MYou5tagd+TDYpLjp575Y# zuI6N8g3ZKEtooblV1+{bsRKBx;V4I)0rpeTj)m0y&9pg4iBjtP7{eTN022wGA3$_o zn_)F^!(ZE=VaTpzkt5dtCJO>b+v^S6I2mZNY<3Rm~(ma*1-ysk=1kl~30 zyx4)@)Zj9gy8w6rTu6DL5LNL9#Xuh#-zTLMNk+(Ij7ue3rlE%{m?>lz$RT7heU_5mq6WhGs3 zw8NYfY6=S*vL$+j!>$Q^te7>jAUR;4N^YKV35Hh$_1roxQP z1WszG!S?l5uI_TRwX8b#b1>oT2f-mkC+#qh)3LV%T(0iwwr#UUGJ7@SZ8Wg+0{%+7 zmT!xIPJJnuK$58GeM7tEG!9~KbJPA(oH~MILO>{(wy;-KG~vSeoTw~WCW{RKz(*~< zOyXSg1C-e6a910eiDXykb-@#CZo9@^qAjll2q(bPN*kNow47u2=X8`8)KYayhc{`H zdjS-*sVq$*ZnhPk@TIa12!LqHj}q8IYpmhKrY#t_Q3M#v4^{98Llx*QTs6bJa>a(G zHjqTYuVsW0AX!2|TSpiHoEu>#*e2q^a@fW!zm-K^%mgfyh-;}h4mK3cwLtrT6zO~) z)QNIQha|d#rU)0*yx2za8^(lWP>{Zf@AuiLG-gQT9td+5cA*?qeg(G{h@6jEFFjhiuXthJG~!16h- zW69?=lF#Gv#dbaqUtS$;`wZfu^P5UeOuXjD+o?rOb3+=X0+IUJGT`1yP82tbNG-@W zC(P876LETuBY&G9K@cX^qZ`x!M~QK;_H%>>xH3{|o1hx}A=K0I@i*w3SaQbIj`N6dqH%t03iYQ z3sln6xoMPFKtD9NZxpbV9Or3tZ5g6HMN-5{(`d9hO3LWpY-%O@1a1kkp0DDs_IcDd`ih((P2iddVr9M{C*6uW;gyli1h!d@tU!FV&kkCNP)z- z?nQWNY3#9J)6sowz+qTjL_fQG?`U->65}DqOJQG}t*Q!`BJCJ{Q_%Vu1UIs|gHqRg zf`BZ+y6@QI$@Ow84%PSpT~-kJpMKsgmGD#}NFBhdT$n-he5&-3G43@p}5u z3#AahYJ$yau4*oHREy|vP2F3WPhr0&egL%9aLWm6M^BXeVNZIZ2QTa*dqD64I5lcs zI5F$vo)_73qo{F{eFU@d9caP}A);6%{#O9%O?R@BsSwsl9u^dmD`tZcT%%65Ef5fF z0ak-EAhw0W5Bx;!v;{qpgLq7W zjsm9xYS;z`cfl{*SC9J=acdIy0_sWI#op{{AwR0=!XQOWtj{#K=2Mgb7Art}-`a;#(DOY#GEQqQjr!`7O!77Cfy7XL8_d1xzSu6+8z9zv$qR;`w|; z*Cs+h1);gXQ6eL@E`{O}de}k-$_^q$IB`2+Ti|&eglZLbJlANzr)I)|vBcH@=XX69 zq=ZZ~C*J1BLjej3C8FA6ODlw|G&jQd1DSnhlfIWZS^)wloSZ@G=`j>os4NdHo;+w1v^rg4f?k^tZ%8vG>5>fEG@}s?z8T=68#`Q?>#OO>5mt}OslmV` z%E>p8ZdY0(O?teKnuiu1fC4c;njkiClOx_?s>OrC@CYqrs<(l^)#1B95mzKQaEMIO zsuT25WEyag;7KDgJa|YA=p?dx*(HCtLDU3sTA^>?Y?y8(hW!z6S*PE!$YVv45)xsf z4b+O>V$5R)svJggzbUe%Eo50?4rXXV(%J;Z?Ic!{&l_43vT0k|xGr;uGMDf}FjSFt zuN1gi*9{42UUdSFBCsPD`k!F?*wn1kXi~!wQ~V^RY-56^uF+_$uOy-b4dAuuilR?Gu*5Tnu;qq&st@R zRo5A6*?g<5$x6}SlEjVREEWZ(HW6yLTek`$t^3JfZ&VgjEJ)!s*VcRkg-2Y)>!=!% z)P~06cuID{#j1-?zrnNwE-knrBaXOEe6v-+RJ2d;PQ!G*oe8NGq(FKcx#x zWOTjP;Zmg6A5g(J2ajcs0M0rLy)l%pewG#i``1oT)Xg+dVkzr)}+7=CYp-xJ*L)L|n( zHqKWD1Z-~iT)RL>Jay_M{Pq5WdFfkx3hjzt<|Cl!hvd0>wdLbquimc1^!m-auHLSE zvhtn3t}=@7&;PpGPL%t9$0M!NtFH-ln)N)^sec*PDgA$Sow|i%6FiA6x%>t-hH^U~ zdWQZoYK4ONeB>>W=YfaY+jU@+@H66rliGp51^m>tdGcouPwaJ;34la}GVG2p016FN zz$1{fxF}%$ls>cb6wY`86{D(&Y*8wU@*aG!eRjaCK8J#WL7>@XdC{v)E2c%SrccZA zs#cqpk+0LLAPxVlfGp%Ke1#{Hc}0+XZT~ls8F-cn3=9f-e*4bnpWm|m#b@?z--(aU zLjrtm*uH1`uIIOJ*#64)?c4vkeftZLX*a&S^ZfQ_w{L&$mFGzsq*NZ7_sE40w#?kF zj$fi0`r($zyDHzRkG*hm%fti!**DLqpS*SI^XHv|HZJ;L=Tyx6<-IeyEq$Juf8Q?` z9(m**#p`b@{$>2+t;5y^1O){Jhko$hKVm0+rn=CrPv(M{cL!uI>N#M-f>R*_4{i+| zFxeS6Cvg12&;jtO&u7^;+XhTndUENf=ZZsP^&!Ke=4CC+c_Zs$kL}YY_YXaqwV*>- zrD0LGsLVBia|Rw<6gp)5^1!fl2S#oOgsGDT?tCsZBRecRbW(sKJM?VFFhz(edQxl$ zL(sd~-SXCts|qa~9F`p(9}t%PX~$srNigKjX;w@K|K#A?QwvHrU2d=Hcl+f-|29w9 zdi-S3EvpWV{pptP5A~?H{Ccl;$Xzc-FMfX6SKZgXa2Z}S2E9Svo-DR7@Av3; z>-%qYue;^)2QQv!==4TYpZhN#n(|5G9Q)Pk$`!FF^7v=5FQNWV z`=6pa!%v3`Nrh2X=f^)Tt$T0mk9pPm2bB+Ab0&P+qmw>cl#}<&x-YhTefrIA-M{=M zHhNfY%l0RiPo3>hEJ%2K>w62M}Dm^4E*VZdo1ttUY8RTrZ~}O;m^0^mYn}%T*j)~#>cEzym&|L zGq=wPTzV*G-E%!22nw|3zO?LcOx2-rw}s|C`2IZ$OyllcGH%D)UoIZ|_55EtF3qU9 ztNoaU-iMRkj=aBTbNTw;znd9g4W9b&*cD54zu$Fp;3GQolCdl94vCw$YYIbNNv9N(%cv8 z7L{GDS$N^4HBUsY4LjN#+4ywtP0#GFUGl*1b64$p^tV%YvZ;?RSlxU)X4T1#D{H5& zXgalI1KXvuLNRgLyyRs`RS%vjtJ=EDU|KyoV)>U<>W}WIy6d^O3VSN**Y~XWrf2-_ zRT-vHADK2jR95}QN1EjOM^zWB9@FW|M9cE;b{1QYjQdD``is>wmOrDHSJ*(LX?^-lWKPWbAAais|_ zJsw=S-Cwnndj)BXP#w%5$s|b-y%9Z%O2O229F2^87LuDUP?Uva=f88GkjAHo3c)hS zSMnD!kUTsHml5Al3ZRw27X5Qr2NEH3G1@7qXp}pf9w`b(T07q%f=5KfB&#_!#`gL zIQ7>7nBN^_DzyI-7T_@{EiO9M}SkvW4s z+Sqg3n;*P(rRod}Q@=<6aK6hc)9A)~X7bdK^)OFgf&q`Z{t?0I7a?-IG zuWj4a@Zz|YU%xrs^54A!wmoq1zdN@d-Tl{@rylL~QuK`N>T~!1r`OKrVVgt#eDd7s z7p8aGclFp)(}wOF-h11>2ZEaG{O-abB`=Kb9(BE)cZc_G%ff_*_;2J))agDC(q9PCG!6?a^`V0ZEYN1&#w~= zhE6DvF_erM$}Bm@(WF7g5E7LJO;XB~NJ{6N9Z4!vhA4_|N=oiEMrIec4EN%?A;WtO zm%??uYnMB{{&~-5f7W+D>v`6*p0)O~&t7Nk&*vltnZ()at~%R$Nu5s8gb@+_%NGXf z#yxFFJQa}K&OV_iMs=xXZRfJpCbw1_R3)^y9TjE2!t{>svRkpwYZ68-W6cdAd;n%0A#%Hk|K^)z$cog913v`DdcV*6{^S&^${?)pF5 zk3ZKVZ`ar~BRZ~dw>xXXN7CX-PFVaDpOnMCUYnHeiPIKY`^lzGvoQ9owDvBZGG|MT zoqP1u(R(Mm*H15OFB@hQc*-yQUQXzw!~WI1X6wBlX>8%RGsZ6Ti2VAz*#(tzgwo0H zY@FW>4Vh;>?{!p2(V{s%HtxF@&AU?@dj4(L8^4eodBpvVp{gh2C65ByO$feZG&ks- zpT`t`?Fqqo+7Z$<^FQYV*UCeyTCf~PL+_FLM#5Hq`B)&<$u$JL1w}N4bBz zHnCOJ<0l1X54S`JNtQNp3$?LMN~^3e_qrui6<4k2!}NUL#b|ypPJMMN_(kr@&JCJ@ zNvXO?Da}2@^Gi3D)kUV8L?3;;;ac88S=q9&!^3+l4Z65VV->T3t4^DzT}j+<*mz@J zUDW;P)bi@IOpj~X5snz{Ul!@Tqk{?U4Y zXLVLDsY%MT6i3pr9drH0$D`g!Wm2Fmbvg0!YFOdWm0H&-S{e3#aJBf&_2Z_g%yImE zhgq+3XPhZcyRH|naLLMT=TQ3gcE*0s!itXRCpT|g_B6X;#Lm}2JL{73wECts^I8zG zEoepP?DCMf{d#*3d+s@2o|DxnMqhu&=x19${p$AXh181A%D$hyqzAoK?+<*?%Xt~J zH?1N$?c=)GJDCA9cFet<-`shn(ad$9?DM)?9xJXGma~j>WnR%Vsv?Kb`h@R_k%4 z#L0YUz`~oex)`}e%DozH_|8@gEUfjL8yON?I{8I{G>W4IBCXd=BoP$;p$Gp81*DUu2u*x>M?@0I;pYFPVFeXRcC0% zs9glTJ@JuGauOC^(%jRetLrs6n)~XdY7?Qmy25m=Rr!J&=aRm4YrU(V;Z>Ql_9~Q_iT9mDCv#X2ZsOo!Rm|7_e zP1twMRInx+)Z^eiS~cQiU6;GU1Twwvea#MaWh0 zHvup!H1}O{} zTB8+1OSE9nhaT6Cp3m%p8vV(KF$Wu1GYo|*gEd^lVv<-mGdRJK!9mp8VJevSH zgAC&t>@beOc5w$5E5@>1XH|2CW{pnQE@XmOm$EobjAcU%%MiuQtFl)%1-@%+?ihv< zdjnX$s3-_7675PBv2sy7s(oA(Pt0bOUYNx&6EhfEK%YU6Yp*Y>S=!{~&XW70-GdPJ zNNiRc`&1NPM6lPQ_-1s&D29<3VenCl@tF~4N2*1UIwUaw(n)WwK?M5G+4H~1AqBj|dIV&~B0#k%DgdwzP z63j5qqS2-|Hd242VFN=d)-$X_3WER*gBmJ^WUQ6Jjxdg7uqBKo4|tO^-vN@lLrYxA zv7WmvcQNe54u))OXV`|V3|p|7Aq%Hu-KJBoP?CnBI8w}G7qOTm7S3>DaD;=H^!5xB zaY%-#geklqCBcIuny`o?iV#CAVA}%nTM-{_4oMouQ z8HUq?i`cMlP{?o)2N(*F&#)i+81j(Ia0I0chjCY?h$W}xybFHemvSs2EGD;S`1q19 zTKTLIKHK6C3?pScT2c~Pa2tJ|BOH2E)rn-6O$i;j)PZ2a(Vk$;VMP4+3+)JPi9O!? ze2*=1(1sXpzyoOgR{%2o!}PER*}luH zZ$@o@*CuDJ-^g9-4PG<6LIuN1{K`;)|1iA3a~aGiQHN~RcU8m5V_kDHqdBB)LE*H% zM7Dk|JuM=c-G0J1Jjz1CL6Rx*$!R~A_7U@5_!4}`?W5L7CUNJY8^M*M7r}*3Zx2GI z^b94Pr$kq*8Be-I6Dc428TKKMAs0t%Q3@U2pgFxLBN?lr-_CLAETNL)4B<4#DZ)vP z6NKX&-x0ng*;{A&;KU_If&+&=LBTPx2@?o%F3AYvc`{^R6GM9aGxqya)G<8yk`>DN zlA#nX8Nv+*69#cu5e9M$AoSs#j%sHgW4jS z^5}!x0eZimY)D>6)>IWjvLoB6{Wfm2m9T|lGa-xL;+YhW)g(_sJYf~DUP<6T*LYeT zJHbvK$9D|h;+U9lM;mikxrr{&7CKK-1r$#IxRYlSym<3jgqa*O2%bEUVU$Ckl>7LD zpoB_El0BL3eRBJQepgBa^00~^L9&44U1$XrZzNen8es!(n@U*Ev5t^J%IkEDYN&$C zM+xPmtfJ9$nMRTF493uTeqmja*-2YN^6uJdf{NQE6V~#sljx?_W4f*N2(=8q;331$ zc))NU_ZWV{kJ65~(-pTFZlT5%)%CC-`R_0%o-s1?B<`3YlQI5>t>T-K|J|av`ehHJ zD<>IFxRI>!soN=@H`S*LP1Nc5wT?yeeUO4iy={}#pXJ9#J@3Ta29N^4fEs0zHT;HBQG5_^RXqL6= ebP@OC+o*3L8ZmawD?j7kW8Ri0)<0M)Px&v;_s^vO literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin b/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin new file mode 100644 index 0000000000000000000000000000000000000000..f4ba460cc4c5c589ce2a60f4d08d9a6f563fad6d GIT binary patch literal 19471 zcmd^H34Bvk)_=KONK0F~B_yRl3$(UKlkOBKSSYP51u6vr0n2Ohn#Lx1F)!)DFvy33 zh=}0OCM_}oBg&#Eia3>`jDjLMii!&2Hm)Or4h$|Md^qRcx8!B%0_yKK^L+^~?{4RR z&pr3tbI!f*mCErmb5!A~WeWW7;Fb?RR8**xDvDf`ii)SGEq`g!ytHfj{q8ZFJ`Gzt zy5&mqfIb4gZBL)L7%E~%LJt^wnIbfl3JakqYM26kgbz@x3B~_bKC^6XL@=7xd!E?$ z{plSqC|ktD7@(b6TKnX#s?r5Xxf>U5O8Vr@btli9J{~Ir7CZPqa0eNEp}_xk{`tFC z`vsHHg*z07pWG9vg~680s{kony7!6D-)`i<&~63(cleVZP60!Uh!RDrLRE<{(S0so z{5Dn*axqr1NCA_cowx@nl@xXOfxR0a-nMPi9|uUO#HV*}*|@#^Pn({;zx_|!Lws@p z+(V}uLh!$bUw+|qbWfc=qWFCE8w$0WqN<0y_sA-B-qw?=>h=t2e`H;2*5*f7*F8Gq zrtNpvEt!1u+Zz|gZkQVPWyasmrdHk>GWKl)2yVou*T0*Zv};wkackR>y3dCsnO28y z+SvUR(7}#zR}) zHqYDG_SoveAzQ9cDPf$$;d|%5eqI@cN4W^rCl+lo7VWIrWwU3`0_m2`7Q{l zc+pqjAHrXNv3;P3;A4WcWgWsE9Dweh7N&-cY@j=y;5*nB9l0qJd%5><)n zZ!c`P7X;{Q;NqQ!PhGTq(!Z-a$Z{flA8>EGAQ>=9P5iue`vL`d)9C@9*wAXvZOd}TrDb=EGbrZGwxpYEh-Iwi%9`I zS*=ucDnKD9ZL<21LS+<^+mY%>sF40gxN0jB()UHEPK(ViMXHX42<5NFsA`1d?tW5o zU96Nm?@fLar#LgcMkUc%edE zL}?PLoV20d>7WfvCGBvUoo3dSmJF>*)2E8I+ARzRU4>T^ z18hO4Ce&i)oGL`&YR1A?8Jkn3l=_SE_BVN=7fWO(jPGL@W*`pZQJU}yx`we_K^GxY zndUSJfk#G~kSeDmNa=K!HL$b~l+L!9oq;vsRd(7I$T-qOPIR#LatCc?xIn7NsSMLa zC8dd&WwtWYX&cK~*2E=aA&RquG|>}X4jNAhsSgAWuY|c}jou27m$H<#Vxb3>m?-By z!pwLnMLESyWNK-b#mUd?tY$moZKGCdleL%P)2U=tj24y?Ry7r+R;pn=bt=-8#d8D9 zaiD>QWe3gyw3DLLkEf{A>O=iiH&YlBX;qP0AAKd@Mzt~hkfL}lG1?kF(jyp=1mi`F zu~nne5sXlR5lpKI7Oe0iq3PRE@m$Hydo4cC8ISsYib|tCrcsTPp~_YP0Rt1U6{Y_p z9Kb}cpizphLJoETgRRv{U}@oz+DYQtA?rn)51-`$oh(v=?K!PTzb^th5q*nBDLNc^ zCu>rnlUIAw3BSk)I#{z~A?cSQ0bO)W8l~v3!H#zhmq}SBotg z843TqKmKKmzdTvuy?_dj?Luz^yF{>TYFqj@ap}k7lChXaz{19h4Wu1Q*2{(=fO^o2 zij`4rz5qk1E?1YQKObLQ2;SB^PdhQ?Wq(SqZIdWhCHPSu#FU)_rIBFU;kA2&u1VdL z{#pX)p%^uI;|HDx-eSCik$&v>cgF*HV_U$^*`bwiY zqK46kv5YsWk&k9KhCGiU)t&vpSVv*g+@I1$X`Cjrv(8ncGq6^@$wAlBHd=3QcCeg* zwKMvZ2FAgGmq}ro?BGza_sFs_jajtAP-k{B2B*uxWC5w2HehFzWw+2ao7rZ{D##gW ztQl2XYs{K6xiX{1Wwsboq_RwK70DEJ75}JH5D8bp0nRf!-=)A$pl0xiY4JIua&S{a))>!QX=Oxd(z@=AL$3i_H zG6@Z!fMXmDW&R=Foy>dP5X9ldCm^3wU`Cck!rcc356`;{qF!|r{? zuS^_&B9vo!Wx}O{D-W(B=kl8`!Tqj|0n>dB!T@Uq9 z_&eAW@*>_}p6R0qaI?#lxJ+(gtE)FFF*5IL_mVVEe_W~P&as|RD!TEl%OVtAxYrF4 z3a@Y79-)vNYiERlxTs^$K=M+bMJTYN`Y}Qg=&RO5Dtw&9=16Aw`#(`sNHvH{3;MP< z5=k->ob$0rsAOQ8o2Q(@6zeWUx1 zE6cxrRO+>wf!CyE49LhGoSK}Hl9@PY*r4IX1?AH%h10VtO2(JX(~r+9OHubrO;nZZ z)fwu7aYbWBj~<~)Nlw$GB_*Y3l80P-UD~+x>>0M~oN-a9v8sf$k$D+;`3e073{(R_ zcI}w5+QM-K5#>=aF=^Suu8SY4O3NQrc755r2_@rajqMi`Uv~3#8D)cpr>X`IDjP9t zz;IR3q6uT>T|c2@+|>D#N~(&>s5R45GZUZH&*#maPHVwh@Qu zW&`W6>tXi*`yG7DtIHTZP0D2I9CTw9#N^XBQzaZKGfw?%Sks)IwlMXa-o)zd^(MWQ zHR^jnEosQs73gyGu%ptM*d7}Dv0!33&TRjM9m5{Y$y{;QPKViKVjSfzn*nbD9Sq0m zYw_*|SQHCbN2bmU+fcoWbLe49Nn5a7IEO(mw%|$hMusOfvAz|$=}hAs2W-k6Qn_id z*$!RsrI|W%?*<ww)QZ1?qcydwq-VPiZ2Z{!(ZmF(yPv3$t%K;&dWT0W&Z z0gC6UM+nltKSUruuxkeWs$dOVIqM)#68t+g+nl9%&s@oG@qMcsvEXL%WA3`a^{j(+ zIq~+}PdWmLAmUlD$9L-MoKCwSzB<}wv@i}`6&xlolV;7T3>>vDg+OQW2O^JZ#HPAQ zJSnyYb)|!Q_(dlYoj5)CI{W`?UL(Xo%`IQ$4n->>6kn&x;V%J8l z9+sZfY=n7g#NB2Lp9}{w4i`T0!qZCIb>0F#&-0Y)&r=Xn=FcnSq4s#8d>nzS`v_J}0C|>=#X_7vXHHsCcz@LjC z>e4MZKQY;2A9`s~n^-t|HR)J~N#BHP3D>#$x?F<}F2PpxV(t3DV0Fzcgk_6IMBoTy zFBUMF4)5}X6I%bV_kIQ(jDZE))eN&B4o%08JYZl_J@&0OTPEL*1UfpCgKey`zK7++aVkE;0smD-8bm^6JvxTLNVpeZi z+^B=|_^y1Zyq%u-IBfpqr8+5UkS(uZ3={JrMt4}KB$~T2BF1K5jm&tPQHTYvjGFhN zm9XTlhOQKzeS^QeGE(RZPKx=E^~xyW2>^t-6%YwCHveb=hcxhBh7?ONRqz=i|_qkKz!jqqwnJ@hUS&m<@i8;X?yHAc3qi7`MF(Iqeg^}#TW>@?bG@G>g) zMM~j0FU(Yzm;xKVCd((X!C@Wa1L<$7f-LJGp-s;{kr*mdN7%l~J}|E!BAGtQIDt77 z!!*)|yp<3g1vp?6<0LfG;VDdS+Rwu81UY!mYNhK*L^}hXtO@Uq{f2K`6aubAv<1MD-^Qz2?8NVV?Lx^kj+ETnV_Zs?GZ`AT&w+C!Z>M*VMxf8~QtYUCYMH2fHWqr$Y_ic74orX$ z6lLlvU}Ml5Gb1+*z1g4$T}fT~nxk&EVZdA^+1O?3X1Z)di}U;$>{VFEZx5wiBI^$l z%wS*XD1|L!ueOfD?h-5jyg>nnJ^X3TG*aBlI@)ODeKGN1jH9Ram9(?&iie5ygSMOT<&8IBb5TiiTq$dB4qT)BNcGUh4sEMusbEva=3A z>aq6#CBzmp5Qb$<7971udsT2QP8Mu19CSc8V88S@Hin)JAOOMAXX;!wEA8OwXiK@v zV)37AX{mP!jyKJD`LF>E=md^aIYYIDUW~VvrY->j34yzEVAPMbt3trN&!iji%D-xQ zOgsZ#ANtj0bG2f@gThRSmcz*6oD|0JS;2Trb8&DsOUy}+dWPT2F@pw0^~wn2g}O%mNoJHEzuzQ!SGg}0IiSjiNf)W5gzsF@jqtpw zk;~$Y^;z&_1!rL<>@1iB=p6LMUu&cez4PshfvrkGwk|K%7Ysk4!SBgq7h!NT+nubw zsUUk498sE^z$Hsc8nCzaZ_HB!0m0n%#!|q^F^&eMg9d#3)#WI=w0(ib6XSxW;Y~LO z??G~OEmLh`b8>UA#Jj}t+E9PK#*>FP*tC-^XfF^XAHhHO5lMfI!HLkkXCMZpccT&s zLE>dLmzDgz$}e*d80MEDNDfpq$Gw>z*+3-vax-Hwa=oE8!;70B0)cX}t(MIMt*>XM z+So=L4};GKevPTZUBNSFvKy>+z;jV}2e+3h*cF#Ec@)gyRgV_y%&+B(mLBAXN=^J z0OlU_K#!N{=-;msNR3h)TH@VmE3a2*$}xnDf7 z{c-r~=8y~M2ZUelJa7Phy7}O?d^JwR_QNl>F7sGkLe!ul;{RmLJp4-Z90#i^r6b_Fq4y=i>1Tx721llfT+$86d3@t_^Vi>ZK31 z=7J2DDPq2b|B^weeCC-B_`)y#2x-Vb3H~KtQiU^OJxTL2U|Dyjti!T5wm=7f0W&uh_llBZu~t@213Oujptx`({hucl9$K zah!dx<&_1xEH(G}`A>4@?7q38#@kkoaB4#=;Y<|VboWKn3(98 z{+c&iw$Ed}Y`yi1UH2?Je_~DRJp*rkXm0iQmo}MC-n;TeI5Inxj2@7L@w6ln`@yhj1LN&Ox!Y#2dN(Rf^dKSw4#sR1Ryb?{Lr z6bQb-KOq6dfhD*}2rd??cq|DKi7Dw03EUJ{02|v8`eG#htfro%v(VvEfgTjb6C%!P|6>@YP2@a=YJH-??nn^JYpG%Vb%~m=(m&iVW^c;8@&Eio z>s!~ZAGiI@^9kvQTK{oqY;0^??T$xRPTz5O<;!jQ*4T{$TQb_dZaw8Ga^LE?#r=BQ z?^@S-R=Ay>h3*yYueS_r|Gp)y{hOALJpJ6?dWN~b@?7Ko*puL1*8XnG`|TrJ=6E{Y zhdeXf&w0kX&w29Q7uMPwPp10hEo)6rtXQR8!^RBza zv()`yTU~3jXOmmq_H*k+kH)R@eC|&2{L8)4bC3H@xNh(~?|#-j+cU;<(tUA{X6ea2 z=}VvR)Bx5I{auFD9UB%;j!rnet)Sv#cU|_jMN^*3zdhp8%%9Sf6*fZ!rmtqqBOPp>$3JNWvP&KkrAqwS03YDS> z6iP+|D3lakPsP)yjzT6>OQBov57V0;Gj*MIqJ~0Is6U0&D2hTV{F8`(5>Pya;!rHE z?MI;)6pd4pDKyC(O4-6u8ilS!!zmPYtDS0Kn=7cLQ=6AiOMizJ!ygx-CJHs81`4^* z%@kUQ7Es8EI0`w?t(4mmdL7k14y90NEE+_ia#ThYgp5YiE?Y=3wQmd>O`%aY^rdc$ zL=hBHqCT*oQ3-{HqcG}~VaJA2Xb4KBPzoAMp+P8vLf4>lEQn01CBHq7Lb)i1LfJ@9 zAsx!1&kov(GdOh_MPURI9jigW^Du_i>A{2(W7)4E-qEew&%s|sAG!0cz zXg<1$Li5mE3e7*ORrSQ*oLvzZUcqe9S!yN_s|+G zWPmrQPm%MOwiD_f)x0wQHK<=k>S59$E&B`|-7$a9-=P3vN1%_wKNNNJROueyF^@Qi L%b!^M`q_U2y?XL& literal 0 HcmV?d00001 diff --git a/pkg/experiment/querybackend/block/writer.go b/pkg/experiment/querybackend/block/writer.go new file mode 100644 index 0000000000..dcaf5e309b --- /dev/null +++ b/pkg/experiment/querybackend/block/writer.go @@ -0,0 +1,107 @@ +package block + +import ( + "context" + "io" + "os" + "path/filepath" + "strconv" + + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/util/bufferpool" +) + +// TODO(kolesnikovae): +// - Avoid staging files where possible. +// - If stage files are required, at least avoid +// recreating them for each tenant service. +// - objstore.Bucket should provide object writer. + +type Writer struct { + storage objstore.Bucket + path string + local string + off uint64 + w *os.File + + tmp string + n int + cur string + + buf *bufferpool.Buffer +} + +func NewBlockWriter(storage objstore.Bucket, path string, tmp string) *Writer { + b := &Writer{ + storage: storage, + path: path, + tmp: tmp, + local: filepath.Join(tmp, FileNameDataObject), + buf: bufferpool.GetBuffer(compactionCopyBufferSize), + } + return b +} + +// Dir returns path to the new temp directory. +func (b *Writer) Dir() string { + b.n++ + b.cur = filepath.Join(b.tmp, strconv.Itoa(b.n)) + return b.cur +} + +// ReadFromFiles located in the directory Dir. +func (b *Writer) ReadFromFiles(files ...string) (toc []uint64, err error) { + toc = make([]uint64, len(files)) + for i := range files { + toc[i] = b.off + if err = b.ReadFromFile(files[i]); err != nil { + break + } + } + return toc, err +} + +// ReadFromFile located in the directory Dir. +func (b *Writer) ReadFromFile(file string) (err error) { + if b.w == nil { + if b.w, err = os.Create(b.local); err != nil { + return err + } + } + f, err := os.Open(filepath.Join(b.cur, file)) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + b.buf.B = b.buf.B[:cap(b.buf.B)] + n, err := io.CopyBuffer(b.w, f, b.buf.B) + b.off += uint64(n) + return err +} + +func (b *Writer) Offset() uint64 { return b.off } + +func (b *Writer) Flush(ctx context.Context) error { + if err := b.w.Close(); err != nil { + return err + } + b.w = nil + f, err := os.Open(b.local) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + return b.storage.Upload(ctx, b.path, f) +} + +func (b *Writer) Close() error { + bufferpool.Put(b.buf) + if b.w != nil { + return b.w.Close() + } + return nil +} diff --git a/pkg/experiment/querybackend/block_reader.go b/pkg/experiment/querybackend/block_reader.go new file mode 100644 index 0000000000..10500d4e90 --- /dev/null +++ b/pkg/experiment/querybackend/block_reader.go @@ -0,0 +1,125 @@ +package querybackend + +import ( + "context" + "fmt" + + "github.com/go-kit/log" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/objstore" + "github.com/grafana/pyroscope/pkg/util" +) + +// Block reader reads objects from the object storage. Each block is currently +// represented by a single object. +// +// An object consists of a set of "tenant services" – regions within the block +// that include data of a specific tenant service. Each such tenant service +// consists of 3 sections: profile table, TSDB, and symbol database. +// +// A single Invoke request typically spans multiple blocks (objects). +// Querying an object involves processing multiple tenant services in parallel. +// Multiple parallel queries can be executed on the same tenant service. +// +// Thus, queries share the same "execution context": the object and a tenant +// service: +// +// object-a service-a query-a +// query-b +// service-b query-a +// query-b +// object-b service-a query-a +// query-b +// service-b query-a +// query-b +// + +type BlockReader struct { + log log.Logger + storage objstore.Bucket + + // TODO: + // - Use a worker pool instead of the errgroup. + // - Reusable query context. + // - Query pipelining: currently, queries share the same context, + // and reuse resources, but the data is processed independently. + // Instead, they should share the processing pipeline, if possible. +} + +func NewBlockReader(logger log.Logger, storage objstore.Bucket) *BlockReader { + return &BlockReader{ + log: logger, + storage: storage, + } +} + +func (b *BlockReader) Invoke( + ctx context.Context, + req *querybackendv1.InvokeRequest, +) (*querybackendv1.InvokeResponse, error) { + span, ctx := opentracing.StartSpanFromContext(ctx, "BlockReader.Invoke") + defer span.Finish() + vr, err := validateRequest(req) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) + } + g, ctx := errgroup.WithContext(ctx) + m := newAggregator(req) + for _, md := range req.QueryPlan.Blocks { + obj := block.NewObject(b.storage, md) + for _, meta := range md.TenantServices { + c := newQueryContext(ctx, b.log, meta, vr, obj) + for _, query := range req.Query { + q := query + g.Go(util.RecoverPanic(func() error { + r, err := executeQuery(c, q) + if err != nil { + return err + } + return m.aggregateReport(r) + })) + } + } + } + if err = g.Wait(); err != nil { + return nil, err + } + return m.response() +} + +type request struct { + src *querybackendv1.InvokeRequest + matchers []*labels.Matcher + startTime int64 // Unix nano. + endTime int64 // Unix nano. +} + +func validateRequest(req *querybackendv1.InvokeRequest) (*request, error) { + if len(req.Query) == 0 { + return nil, fmt.Errorf("no queries provided") + } + if req.QueryPlan == nil || len(req.QueryPlan.Blocks) == 0 { + return nil, fmt.Errorf("no blocks planned") + } + matchers, err := parser.ParseMetricSelector(req.LabelSelector) + if err != nil { + return nil, fmt.Errorf("label selection is invalid: %w", err) + } + // TODO: Validate the rest, just in case. + r := request{ + src: req, + matchers: matchers, + startTime: model.Time(req.StartTime).UnixNano(), + endTime: model.Time(req.EndTime).UnixNano(), + } + return &r, nil +} diff --git a/pkg/experiment/querybackend/client/client.go b/pkg/experiment/querybackend/client/client.go new file mode 100644 index 0000000000..3ac72eee5a --- /dev/null +++ b/pkg/experiment/querybackend/client/client.go @@ -0,0 +1,73 @@ +package querybackendclient + +import ( + "context" + + "github.com/grafana/dskit/grpcclient" + "github.com/grafana/dskit/services" + "github.com/opentracing-contrib/go-grpc" + "github.com/opentracing/opentracing-go" + "google.golang.org/grpc" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" +) + +type Client struct { + service services.Service + grpcClient querybackendv1.QueryBackendServiceClient +} + +func New(address string, grpcClientConfig grpcclient.Config) (*Client, error) { + conn, err := dial(address, grpcClientConfig) + if err != nil { + return nil, err + } + var c Client + c.grpcClient = querybackendv1.NewQueryBackendServiceClient(conn) + c.service = services.NewIdleService(c.starting, c.stopping) + return &c, nil +} + +func dial(address string, grpcClientConfig grpcclient.Config) (*grpc.ClientConn, error) { + if err := grpcClientConfig.Validate(); err != nil { + return nil, err + } + grpcClientConfig.BackoffOnRatelimits = false + grpcClientConfig.ConnectTimeout = 0 + options, err := grpcClientConfig.DialOption(nil, nil) + if err != nil { + return nil, err + } + // TODO: https://github.com/grpc/grpc-proto/blob/master/grpc/service_config/service_config.proto + options = append(options, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer())), + grpc.WithDefaultServiceConfig(grpcServiceConfig), + ) + return grpc.Dial(address, options...) +} + +func (b *Client) Service() services.Service { return b.service } +func (b *Client) starting(context.Context) error { return nil } +func (b *Client) stopping(error) error { return nil } + +func (b *Client) Invoke(ctx context.Context, req *querybackendv1.InvokeRequest) (*querybackendv1.InvokeResponse, error) { + return b.grpcClient.Invoke(ctx, req) +} + +const grpcServiceConfig = `{ + "loadBalancingPolicy":"round_robin", + "methodConfig": [{ + "name": [{"service": ""}], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 500, + "InitialBackoff": ".01s", + "MaxBackoff": ".5s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ + "UNAVAILABLE", + "RESOURCE_EXHAUSTED" + ] + } + }] +}` diff --git a/pkg/experiment/querybackend/query.go b/pkg/experiment/querybackend/query.go new file mode 100644 index 0000000000..8ac4d02b1c --- /dev/null +++ b/pkg/experiment/querybackend/query.go @@ -0,0 +1,141 @@ +package querybackend + +import ( + "context" + "fmt" + "sync" + + "github.com/go-kit/log" + "github.com/iancoleman/strcase" + "github.com/opentracing/opentracing-go" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" +) + +// TODO(kolesnikovae): We have a procedural definition of our queries, +// thus we have handlers. Instead, in order to enable pipelining and +// reduce the boilerplate, we should define query execution plans. + +var ( + handlerMutex = new(sync.RWMutex) + queryHandlers = map[querybackendv1.QueryType]queryHandler{} +) + +type queryHandler func(*queryContext, *querybackendv1.Query) (*querybackendv1.Report, error) + +func registerQueryHandler(t querybackendv1.QueryType, h queryHandler) { + handlerMutex.Lock() + defer handlerMutex.Unlock() + if _, ok := queryHandlers[t]; ok { + panic(fmt.Sprintf("%s: handler already registered", t)) + } + queryHandlers[t] = h +} + +func getQueryHandler(t querybackendv1.QueryType) (queryHandler, error) { + handlerMutex.RLock() + defer handlerMutex.RUnlock() + handler, ok := queryHandlers[t] + if !ok { + return nil, fmt.Errorf("unknown query type %s", t) + } + return handler, nil +} + +var ( + depMutex = new(sync.RWMutex) + queryDependencies = map[querybackendv1.QueryType][]block.Section{} +) + +func registerQueryDependencies(t querybackendv1.QueryType, deps ...block.Section) { + depMutex.Lock() + defer depMutex.Unlock() + if _, ok := queryDependencies[t]; ok { + panic(fmt.Sprintf("%s: dependencies already registered", t)) + } + queryDependencies[t] = deps +} + +func registerQueryType( + qt querybackendv1.QueryType, + rt querybackendv1.ReportType, + q queryHandler, + a aggregatorProvider, + deps ...block.Section, +) { + registerQueryReportType(qt, rt) + registerQueryHandler(qt, q) + registerQueryDependencies(qt, deps...) + registerAggregator(rt, a) +} + +type queryContext struct { + ctx context.Context + log log.Logger + meta *metastorev1.TenantService + req *request + obj *block.Object + svc *block.TenantService + err error +} + +func newQueryContext( + ctx context.Context, + logger log.Logger, + meta *metastorev1.TenantService, + req *request, + obj *block.Object, +) *queryContext { + return &queryContext{ + ctx: ctx, + log: logger, + req: req, + meta: meta, + obj: obj, + svc: block.NewTenantService(meta, obj), + } +} + +func executeQuery(q *queryContext, query *querybackendv1.Query) (r *querybackendv1.Report, err error) { + var span opentracing.Span + span, q.ctx = opentracing.StartSpanFromContext(q.ctx, "executeQuery."+strcase.ToCamel(query.QueryType.String())) + defer span.Finish() + handle, err := getQueryHandler(query.QueryType) + if err != nil { + return nil, err + } + if err = q.open(); err != nil { + return nil, fmt.Errorf("failed to initialize query context: %w", err) + } + defer func() { + _ = q.close(err) + }() + if r, err = handle(q, query); r != nil { + r.ReportType = QueryReportType(query.QueryType) + } + return r, err +} + +func (q *queryContext) open() error { + return q.svc.Open(q.ctx, q.sections()...) +} + +func (q *queryContext) close(err error) error { + return q.svc.CloseWithError(err) +} + +func (q *queryContext) sections() []block.Section { + sections := make(map[block.Section]struct{}, 3) + for _, qt := range q.req.src.Query { + for _, s := range queryDependencies[qt.QueryType] { + sections[s] = struct{}{} + } + } + unique := make([]block.Section, 0, len(sections)) + for s := range sections { + unique = append(unique, s) + } + return unique +} diff --git a/pkg/experiment/querybackend/query_label_names.go b/pkg/experiment/querybackend/query_label_names.go new file mode 100644 index 0000000000..744b16fc39 --- /dev/null +++ b/pkg/experiment/querybackend/query_label_names.go @@ -0,0 +1,100 @@ +package querybackend + +import ( + "sort" + "sync" + + "github.com/prometheus/prometheus/model/labels" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb" +) + +func init() { + registerQueryType( + querybackendv1.QueryType_QUERY_LABEL_NAMES, + querybackendv1.ReportType_REPORT_LABEL_NAMES, + queryLabelNames, + newLabelNameAggregator, + []block.Section{block.SectionTSDB}..., + ) +} + +func queryLabelNames(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { + var names []string + var err error + if len(q.req.matchers) == 0 { + names, err = q.svc.Index().LabelNames() + } else { + names, err = labelNamesForMatchers(q.svc.Index(), q.req.matchers) + } + if err != nil { + return nil, err + } + resp := &querybackendv1.Report{ + LabelNames: &querybackendv1.LabelNamesReport{ + Query: query.LabelNames.CloneVT(), + LabelNames: names, + }, + } + return resp, nil +} + +func labelNamesForMatchers(reader phlaredb.IndexReader, matchers []*labels.Matcher) ([]string, error) { + postings, err := phlaredb.PostingsForMatchers(reader, nil, matchers...) + if err != nil { + return nil, err + } + l := make(map[string]struct{}) + for postings.Next() { + var n []string + if n, err = reader.LabelNamesFor(postings.At()); err != nil { + return nil, err + } + for _, name := range n { + l[name] = struct{}{} + } + } + if err = postings.Err(); err != nil { + return nil, err + } + names := make([]string, len(l)) + var i int + for name := range l { + names[i] = name + i++ + } + sort.Strings(names) + return names, nil +} + +type labelNameAggregator struct { + init sync.Once + query *querybackendv1.LabelNamesQuery + names *model.LabelMerger +} + +func newLabelNameAggregator(*querybackendv1.InvokeRequest) aggregator { + return new(labelNameAggregator) +} + +func (m *labelNameAggregator) aggregate(report *querybackendv1.Report) error { + r := report.LabelNames + m.init.Do(func() { + m.query = r.Query.CloneVT() + m.names = model.NewLabelMerger() + }) + m.names.MergeLabelNames(r.LabelNames) + return nil +} + +func (m *labelNameAggregator) build() *querybackendv1.Report { + return &querybackendv1.Report{ + LabelNames: &querybackendv1.LabelNamesReport{ + Query: m.query, + LabelNames: m.names.LabelNames(), + }, + } +} diff --git a/pkg/experiment/querybackend/query_label_values.go b/pkg/experiment/querybackend/query_label_values.go new file mode 100644 index 0000000000..3ab79ae372 --- /dev/null +++ b/pkg/experiment/querybackend/query_label_values.go @@ -0,0 +1,103 @@ +package querybackend + +import ( + "errors" + "sort" + "sync" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb" +) + +func init() { + registerQueryType( + querybackendv1.QueryType_QUERY_LABEL_VALUES, + querybackendv1.ReportType_REPORT_LABEL_VALUES, + queryLabelValues, + newLabelValueAggregator, + []block.Section{block.SectionTSDB}..., + ) +} + +func queryLabelValues(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { + var values []string + var err error + if len(q.req.matchers) == 0 { + values, err = q.svc.Index().LabelValues(query.LabelValues.LabelName) + } else { + values, err = labelValuesForMatchers(q.svc.Index(), query.LabelValues.LabelName, q.req.matchers) + } + if err != nil { + return nil, err + } + resp := &querybackendv1.Report{ + LabelValues: &querybackendv1.LabelValuesReport{ + Query: query.LabelValues.CloneVT(), + LabelValues: values, + }, + } + return resp, nil +} + +func labelValuesForMatchers(reader phlaredb.IndexReader, name string, matchers []*labels.Matcher) ([]string, error) { + postings, err := phlaredb.PostingsForMatchers(reader, nil, matchers...) + if err != nil { + return nil, err + } + l := make(map[string]struct{}) + for postings.Next() { + var v string + if v, err = reader.LabelValueFor(postings.At(), name); err != nil { + if errors.Is(err, storage.ErrNotFound) { + continue + } + return nil, err + } + l[v] = struct{}{} + } + if err = postings.Err(); err != nil { + return nil, err + } + values := make([]string, len(l)) + var i int + for v := range l { + values[i] = v + i++ + } + sort.Strings(values) + return values, nil +} + +type labelValueAggregator struct { + init sync.Once + query *querybackendv1.LabelValuesQuery + values *model.LabelMerger +} + +func newLabelValueAggregator(*querybackendv1.InvokeRequest) aggregator { + return new(labelValueAggregator) +} + +func (m *labelValueAggregator) aggregate(report *querybackendv1.Report) error { + r := report.LabelValues + m.init.Do(func() { + m.query = r.Query.CloneVT() + m.values = model.NewLabelMerger() + }) + m.values.MergeLabelValues(r.LabelValues) + return nil +} + +func (m *labelValueAggregator) build() *querybackendv1.Report { + return &querybackendv1.Report{ + LabelValues: &querybackendv1.LabelValuesReport{ + Query: m.query, + LabelValues: m.values.LabelValues(), + }, + } +} diff --git a/pkg/experiment/querybackend/query_profile_entry.go b/pkg/experiment/querybackend/query_profile_entry.go new file mode 100644 index 0000000000..6f920f0554 --- /dev/null +++ b/pkg/experiment/querybackend/query_profile_entry.go @@ -0,0 +1,90 @@ +package querybackend + +import ( + "github.com/parquet-go/parquet-go" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/pyroscope/pkg/iter" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb" + parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" + schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" +) + +func profileEntryIterator(q *queryContext, groupBy ...string) (iter.Iterator[ProfileEntry], error) { + series, err := getSeriesLabels(q.svc.Index(), q.req.matchers, groupBy...) + if err != nil { + return nil, err + } + results := parquetquery.NewBinaryJoinIterator(0, + q.svc.Profiles().Column(q.ctx, "SeriesIndex", parquetquery.NewMapPredicate(series)), + q.svc.Profiles().Column(q.ctx, "TimeNanos", parquetquery.NewIntBetweenPredicate(q.req.startTime, q.req.endTime)), + ) + results = parquetquery.NewBinaryJoinIterator(0, results, + q.svc.Profiles().Column(q.ctx, "StacktracePartition", nil), + ) + + buf := make([][]parquet.Value, 3) + entries := iter.NewAsyncBatchIterator[*parquetquery.IteratorResult, ProfileEntry]( + results, 128, + func(r *parquetquery.IteratorResult) ProfileEntry { + buf = r.Columns(buf, + schemav1.SeriesIndexColumnName, + schemav1.TimeNanosColumnName, + schemav1.StacktracePartitionColumnName) + x := series[buf[0][0].Uint32()] + return ProfileEntry{ + RowNum: r.RowNumber[0], + Timestamp: model.TimeFromUnixNano(buf[1][0].Int64()), + Fingerprint: x.fingerprint, + Labels: x.labels, + Partition: buf[2][0].Uint64(), + } + }, + func([]ProfileEntry) {}, + ) + return entries, nil +} + +type ProfileEntry struct { + RowNum int64 + Timestamp model.Time + Fingerprint model.Fingerprint + Labels phlaremodel.Labels + Partition uint64 +} + +func (e ProfileEntry) RowNumber() int64 { return e.RowNum } + +type seriesLabels struct { + fingerprint model.Fingerprint + labels phlaremodel.Labels +} + +func getSeriesLabels(reader phlaredb.IndexReader, matchers []*labels.Matcher, by ...string) (map[uint32]seriesLabels, error) { + postings, err := getPostings(reader, matchers...) + if err != nil { + return nil, err + } + chunks := make([]index.ChunkMeta, 1) + series := make(map[uint32]seriesLabels) + l := make(phlaremodel.Labels, 0, 6) + for postings.Next() { + fp, err := reader.SeriesBy(postings.At(), &l, &chunks, by...) + if err != nil { + return nil, err + } + _, ok := series[chunks[0].SeriesIndex] + if ok { + continue + } + series[chunks[0].SeriesIndex] = seriesLabels{ + fingerprint: model.Fingerprint(fp), + labels: l.Clone(), + } + } + + return series, postings.Err() +} diff --git a/pkg/experiment/querybackend/query_series_labels.go b/pkg/experiment/querybackend/query_series_labels.go new file mode 100644 index 0000000000..1680f5ad9f --- /dev/null +++ b/pkg/experiment/querybackend/query_series_labels.go @@ -0,0 +1,94 @@ +package querybackend + +import ( + "sync" + + "github.com/prometheus/prometheus/model/labels" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/phlaredb" + "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" +) + +func init() { + registerQueryType( + querybackendv1.QueryType_QUERY_SERIES_LABELS, + querybackendv1.ReportType_REPORT_SERIES_LABELS, + querySeriesLabels, + newSeriesLabelsAggregator, + []block.Section{block.SectionTSDB}..., + ) +} + +func querySeriesLabels(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { + postings, err := getPostings(q.svc.Index(), q.req.matchers...) + if err != nil { + return nil, err + } + var tmp model.Labels + var c []index.ChunkMeta + l := make(map[uint64]model.Labels) + for postings.Next() { + fp, _ := q.svc.Index().SeriesBy(postings.At(), &tmp, &c, query.SeriesLabels.LabelNames...) + if _, ok := l[fp]; ok { + continue + } + l[fp] = tmp.Clone() + } + if err = postings.Err(); err != nil { + return nil, err + } + series := make([]*typesv1.Labels, len(l)) + var i int + for _, s := range l { + series[i] = &typesv1.Labels{Labels: s} + i++ + } + resp := &querybackendv1.Report{ + SeriesLabels: &querybackendv1.SeriesLabelsReport{ + Query: query.SeriesLabels.CloneVT(), + SeriesLabels: series, + }, + } + return resp, nil +} + +func getPostings(reader phlaredb.IndexReader, matchers ...*labels.Matcher) (index.Postings, error) { + if len(matchers) == 0 { + k, v := index.AllPostingsKey() + return reader.Postings(k, nil, v) + } + return phlaredb.PostingsForMatchers(reader, nil, matchers...) +} + +type seriesLabelsAggregator struct { + init sync.Once + query *querybackendv1.SeriesLabelsQuery + series *model.LabelMerger +} + +func newSeriesLabelsAggregator(*querybackendv1.InvokeRequest) aggregator { + return new(seriesLabelsAggregator) +} + +func (a *seriesLabelsAggregator) aggregate(report *querybackendv1.Report) error { + r := report.SeriesLabels + a.init.Do(func() { + a.query = r.Query.CloneVT() + a.series = model.NewLabelMerger() + }) + a.series.MergeSeries(r.SeriesLabels) + return nil +} + +func (a *seriesLabelsAggregator) build() *querybackendv1.Report { + return &querybackendv1.Report{ + SeriesLabels: &querybackendv1.SeriesLabelsReport{ + Query: a.query, + SeriesLabels: a.series.SeriesLabels(), + }, + } +} diff --git a/pkg/experiment/querybackend/query_time_series.go b/pkg/experiment/querybackend/query_time_series.go new file mode 100644 index 0000000000..568abfddd3 --- /dev/null +++ b/pkg/experiment/querybackend/query_time_series.go @@ -0,0 +1,114 @@ +package querybackend + +import ( + "strings" + "sync" + "time" + + "github.com/grafana/dskit/runutil" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" + schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" +) + +func init() { + registerQueryType( + querybackendv1.QueryType_QUERY_TIME_SERIES, + querybackendv1.ReportType_REPORT_TIME_SERIES, + queryTimeSeries, + newTimeSeriesAggregator, + []block.Section{ + block.SectionTSDB, + block.SectionProfiles, + }..., + ) +} + +func queryTimeSeries(q *queryContext, query *querybackendv1.Query) (r *querybackendv1.Report, err error) { + entries, err := profileEntryIterator(q, query.TimeSeries.GroupBy...) + if err != nil { + return nil, err + } + defer runutil.CloseWithErrCapture(&err, entries, "failed to close profile entry iterator") + + column, err := schemav1.ResolveColumnByPath(q.svc.Profiles().Schema(), strings.Split("TotalValue", ".")) + if err != nil { + return nil, err + } + + rows := parquetquery.NewRepeatedRowIterator(q.ctx, entries, q.svc.Profiles().RowGroups(), column.ColumnIndex) + defer runutil.CloseWithErrCapture(&err, rows, "failed to close column iterator") + + builder := phlaremodel.NewTimeSeriesBuilder(query.TimeSeries.GroupBy...) + for rows.Next() { + row := rows.At() + builder.Add( + row.Row.Fingerprint, + row.Row.Labels, + int64(row.Row.Timestamp), + float64(row.Values[0][0].Int64()), + ) + } + if err = rows.Err(); err != nil { + return nil, err + } + + resp := &querybackendv1.Report{ + TimeSeries: &querybackendv1.TimeSeriesReport{ + Query: query.TimeSeries.CloneVT(), + TimeSeries: builder.Build(), + }, + } + + return resp, nil +} + +type timeSeriesAggregator struct { + init sync.Once + startTime int64 + endTime int64 + query *querybackendv1.TimeSeriesQuery + series *phlaremodel.TimeSeriesMerger +} + +func newTimeSeriesAggregator(req *querybackendv1.InvokeRequest) aggregator { + return &timeSeriesAggregator{ + startTime: req.StartTime, + endTime: req.EndTime, + } +} + +func (a *timeSeriesAggregator) aggregate(report *querybackendv1.Report) error { + r := report.TimeSeries + a.init.Do(func() { + a.series = phlaremodel.NewTimeSeriesMerger(true) + a.query = r.Query.CloneVT() + }) + a.series.MergeTimeSeries(r.TimeSeries) + return nil +} + +func (a *timeSeriesAggregator) build() *querybackendv1.Report { + // TODO(kolesnikovae): Average aggregation should be implemented in + // the way that it can be distributed (count + sum), and should be done + // at "aggregate" call. + sum := typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_SUM + stepMilli := time.Duration(a.query.GetStep() * float64(time.Second)).Milliseconds() + seriesIterator := phlaremodel.NewTimeSeriesMergeIterator(a.series.TimeSeries()) + return &querybackendv1.Report{ + TimeSeries: &querybackendv1.TimeSeriesReport{ + Query: a.query, + TimeSeries: phlaremodel.RangeSeries( + seriesIterator, + a.startTime, + a.endTime, + stepMilli, + &sum, + ), + }, + } +} diff --git a/pkg/experiment/querybackend/query_tree.go b/pkg/experiment/querybackend/query_tree.go new file mode 100644 index 0000000000..5e988d66c3 --- /dev/null +++ b/pkg/experiment/querybackend/query_tree.go @@ -0,0 +1,95 @@ +package querybackend + +import ( + "sync" + + "github.com/grafana/dskit/runutil" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/model" + parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" + v1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" +) + +func init() { + registerQueryType( + querybackendv1.QueryType_QUERY_TREE, + querybackendv1.ReportType_REPORT_TREE, + queryTree, + newTreeAggregator, + []block.Section{ + block.SectionTSDB, + block.SectionProfiles, + block.SectionSymbols, + }..., + ) +} + +func queryTree(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { + entries, err := profileEntryIterator(q) + if err != nil { + return nil, err + } + defer runutil.CloseWithErrCapture(&err, entries, "failed to close profile entry iterator") + + var columns v1.SampleColumns + if err = columns.Resolve(q.svc.Profiles().Schema()); err != nil { + return nil, err + } + + profiles := parquetquery.NewRepeatedRowIterator(q.ctx, entries, q.svc.Profiles().RowGroups(), + columns.StacktraceID.ColumnIndex, + columns.Value.ColumnIndex) + defer runutil.CloseWithErrCapture(&err, profiles, "failed to close profile stream") + + resolver := symdb.NewResolver(q.ctx, q.svc.Symbols()) + defer resolver.Release() + for profiles.Next() { + p := profiles.At() + resolver.AddSamplesFromParquetRow(p.Row.Partition, p.Values[0], p.Values[1]) + } + if err = profiles.Err(); err != nil { + return nil, err + } + + tree, err := resolver.Tree() + if err != nil { + return nil, err + } + + resp := &querybackendv1.Report{ + Tree: &querybackendv1.TreeReport{ + Query: query.Tree.CloneVT(), + Tree: tree.Bytes(query.Tree.GetMaxNodes()), + }, + } + return resp, nil +} + +type treeAggregator struct { + init sync.Once + query *querybackendv1.TreeQuery + tree *model.TreeMerger +} + +func newTreeAggregator(*querybackendv1.InvokeRequest) aggregator { return new(treeAggregator) } + +func (a *treeAggregator) aggregate(report *querybackendv1.Report) error { + r := report.Tree + a.init.Do(func() { + a.tree = model.NewTreeMerger() + a.query = r.Query.CloneVT() + }) + return a.tree.MergeTreeBytes(r.Tree) +} + +func (a *treeAggregator) build() *querybackendv1.Report { + return &querybackendv1.Report{ + Tree: &querybackendv1.TreeReport{ + Query: a.query, + Tree: a.tree.Tree().Bytes(a.query.GetMaxNodes()), + }, + } +} diff --git a/pkg/experiment/querybackend/queryplan/query_plan.go b/pkg/experiment/querybackend/queryplan/query_plan.go new file mode 100644 index 0000000000..bc33f3016e --- /dev/null +++ b/pkg/experiment/querybackend/queryplan/query_plan.go @@ -0,0 +1,349 @@ +package queryplan + +import ( + "fmt" + "io" + "math" + "slices" + "strings" + "unsafe" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/iter" +) + +// QueryPlan represents a physical query plan structured as a DAG. +// Each node in the graph can either be a "merge" or a "read" operation (leaves). +// Merge nodes reference other nodes, while read nodes reference data blocks. +type QueryPlan struct { + nodes []node + blocks []*metastorev1.BlockMeta +} + +type Node struct { + Type NodeType + + p *QueryPlan + n node +} + +type NodeType uint32 + +const ( + _ NodeType = iota + NodeRead + NodeMerge +) + +var typeNames = [...]string{"invalid", "read", "merge"} + +func (t NodeType) String() string { + if int(t) >= len(typeNames) { + return typeNames[0] + } + return typeNames[t] +} + +type node struct { + typ NodeType + // Node of merge type refers to nodes. + // Node of read type refers to blocks. + off uint32 + len uint32 +} + +func Open(p *querybackendv1.QueryPlan) *QueryPlan { + if len(p.Blocks) == 0 { + return new(QueryPlan) + } + qp := QueryPlan{blocks: p.Blocks} + if len(p.Graph) != 0 || len(p.Graph)%3 == 0 { + qp.nodes = unsafe.Slice((*node)(unsafe.Pointer(unsafe.SliceData(p.Graph))), len(p.Graph)/3) + } + return &qp +} + +// Build creates a query plan from the list of block metadata. +// +// NOTE(kolesnikovae): At this point it only groups blocks into uniform ranges, +// and builds a DAG of reads and merges. In practice, however, we may want to +// implement more sophisticated strategies. For example, it would be beneficial +// to group blocks based on the tenant services to ensure that a single read +// covers exactly one service, and does not have to deal with stack trace +// cardinality issues. Another example is grouping by shards to minimize the +// number of unique series (assuming the shards are still built based on the +// series labels) a reader or merger should handle. In general, the strategy +// should depend on the query type. +func Build( + blocks []*metastorev1.BlockMeta, + maxReads, maxMerges int64, +) *QueryPlan { + if len(blocks) == 0 { + return new(QueryPlan) + } + // First, we create leaves: the entire range of blocks + // is split into smaller uniform ranges, which will be + // fetched by workers. + s := int(math.Max(float64(maxReads), float64(maxMerges))) + ranges := uniformSplit(make([][2]uint32, s), len(blocks), maxReads) + nodes := make([]node, len(ranges)) + for i, b := range ranges { + nodes[i] = node{ + typ: NodeRead, + // Block range. + off: b[0], + len: b[1], + } + } + if len(nodes) < 2 { + return &QueryPlan{ + blocks: blocks, + nodes: nodes, + } + } + // Next we create merge nodes. + var off int + for { + // Split should not be applied to the same (sub-)range + // twice, therefore we keep track of the offset within + // the nodes slice. + length := len(nodes) + ranges = uniformSplit(ranges, len(nodes)-off, maxMerges) + for _, n := range ranges { + // Range offset does not account for the offset within + // the nodes slice, therefore we add it here. + nodes = append(nodes, node{ + typ: NodeMerge, + off: n[0] + uint32(off), + len: n[1], + }) + } + if len(ranges) == 1 { + // The root node has been added. + break + } else if len(ranges) == 0 { + // Create a virtual root, that will be a parent of all the + // top level nodes. We find the offset of child nodes based + // on the last node: its children is the last range of nodes + // that have a parent. + n := nodes[len(nodes)-1] + o := n.off + n.len + l := uint32(len(nodes)) - o + nodes = append(nodes, node{ + typ: NodeMerge, + off: o, + len: l, + }) + break + } + off += length + } + return &QueryPlan{ + blocks: blocks, + nodes: nodes, + } +} + +// Root returns the root node of the query plan. +func (p *QueryPlan) Root() *Node { + if len(p.nodes) == 0 { + return &Node{Type: NodeRead, p: p} + } + n := Node{p: p} + n.n = p.nodes[len(p.nodes)-1] + n.Type = n.n.typ + return &n +} + +// Plan returns the query plan scoped to the node. +// The plan references the parent plan blocks. +func (n *Node) Plan() *QueryPlan { + // BFS traversal. Our goal is to preserve the order of + // nodes as in the original plan, and only shift offsets. + nodes := make([]node, 0, 32) + stack := make([]node, 0, 32) + stack = append(stack, n.n) + var x node + for len(stack) > 0 { + x, stack = stack[0], stack[1:] + if x.typ == NodeMerge { + // Add child nodes in the reverse order, to honour + // the order of the original plan, compensating the + // stack LIFO at this level. We do it after append + // to stack to not modify the original plan. + s := len(stack) + stack = append(stack, n.p.nodes[x.off:x.off+x.len]...) + slices.Reverse(stack[s:]) + } + nodes = append(nodes, x) + } + if len(nodes) == 0 { + return new(QueryPlan) + } + // Swap merge and read nodes as their order is changed + // during the traversal. The order of nodes within the + // same type is revered as well (order within the level + // is fixed at traversal). + var p NodeType // Previous node type. + var s int + for i, c := range nodes { + if p != 0 && c.typ != p { + s = i + break + } + p = c.typ + } + slices.Reverse(nodes[:s]) // Merge nodes. + slices.Reverse(nodes[s:]) // Read nodes. + tmp := make([]node, s) + copy(tmp, nodes[:s]) + copy(nodes, nodes[s:]) + copy(nodes[len(nodes)-s:], tmp) + if nodes[0].typ != NodeRead { + panic("bug: first node must be a read node") + } + + // Offset correction by node type: as the plan is a subtree, + // the offsets of the child nodes are shifted. + o := [3]int{0, -1, -1} // Table of offsets by type. + bs := nodes[0].off // Offset of the first referenced block. + for i, c := range nodes { + // Correct the offset. + if o[c.typ] < 0 { + // This is the first node of the type: we remember + // the offset and reset it to zero, as children of + // the first node are _always_ placed at the very + // beginning (for both read and merge nodes). + nodes[i].off = 0 + o[c.typ] = int(c.len) + } else { + nodes[i].off = uint32(o[c.typ]) + o[c.typ] += int(c.len) + } + } + + // Update references to the blocks. + be := bs + uint32(o[NodeRead]) + blocks := n.p.blocks[bs:be] + + return &QueryPlan{ + nodes: nodes, + blocks: blocks, + } +} + +func (p *QueryPlan) Proto() *querybackendv1.QueryPlan { + return &querybackendv1.QueryPlan{ + Graph: unsafe.Slice((*uint32)(unsafe.Pointer(unsafe.SliceData(p.nodes))), len(p.nodes)*3), + Blocks: p.blocks, + } +} + +func (p *QueryPlan) String() string { + var b strings.Builder + printPlan(&b, "", p, false) + return b.String() +} + +func (n *Node) Children() iter.Iterator[*Node] { + if n.n.typ != NodeMerge { + return iter.NewEmptyIterator[*Node]() + } + return &nodeIterator{n: n} +} + +func (n *Node) Blocks() iter.Iterator[*metastorev1.BlockMeta] { + if n.n.typ != NodeRead { + return iter.NewEmptyIterator[*metastorev1.BlockMeta]() + } + return &blockIterator{n: n} +} + +type nodeIterator struct { + n *Node + i int +} + +func (i *nodeIterator) Err() error { return nil } +func (i *nodeIterator) Close() error { return nil } + +func (i *nodeIterator) Next() bool { + if i.i >= int(i.n.n.len) { + return false + } + i.i++ + return true +} + +func (i *nodeIterator) At() *Node { + n := i.n.p.nodes[int(i.n.n.off)+i.i-1] + return &Node{ + Type: i.n.Type, + p: i.n.p, + n: n, + } +} + +type blockIterator struct { + n *Node + i int +} + +func (i *blockIterator) Err() error { return nil } +func (i *blockIterator) Close() error { return nil } + +func (i *blockIterator) Next() bool { + if i.i >= int(i.n.n.len) { + return false + } + i.i++ + return true +} + +func (i *blockIterator) At() *metastorev1.BlockMeta { + return i.n.p.blocks[int(i.n.n.off)+i.i-1] +} + +// uniformSplit splits a slice of length s into +// uniform ranges not exceeding the size max. +func uniformSplit(ret [][2]uint32, s int, max int64) [][2]uint32 { + ret = ret[:0] + n := math.Ceil(float64(s) / float64(max)) // Find number of parts. + o := int(math.Ceil(float64(s) / n)) // Find optimal part size. + for i := 0; i < s; i += o { + r := i + o + if r > s { + r = s + } + ret = append(ret, [2]uint32{uint32(i), uint32(r - i)}) + } + return ret +} + +func printPlan(w io.Writer, pad string, p *QueryPlan, debug bool) { + r := p.Root() + if debug { + _, _ = fmt.Fprintf(w, pad+"%s {children: %d, nodes: %d, blocks: %d}\n", + r.Type, r.n.len, len(r.p.nodes), len(r.p.blocks)) + } else { + _, _ = fmt.Fprintf(w, pad+"%s (%d)\n", r.Type, r.n.len) + } + + switch r.Type { + case NodeMerge: + c := r.Children() + for c.Next() { + printPlan(w, pad+"\t", c.At().Plan(), debug) + } + + case NodeRead: + b := r.Blocks() + for b.Next() { + _, _ = fmt.Fprintf(w, pad+"\t"+"%+v\n", b.At()) + } + + default: + panic("unknown type") + } +} diff --git a/pkg/experiment/querybackend/queryplan/query_plan_test.go b/pkg/experiment/querybackend/queryplan/query_plan_test.go new file mode 100644 index 0000000000..488e2f7ce6 --- /dev/null +++ b/pkg/experiment/querybackend/queryplan/query_plan_test.go @@ -0,0 +1,128 @@ +package queryplan + +import ( + "bytes" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + "github.com/grafana/pyroscope/pkg/iter" +) + +func Test_Plan(t *testing.T) { + blocks := []*metastorev1.BlockMeta{ + {Id: "1"}, {Id: "2"}, + {Id: "3"}, {Id: "4"}, + {Id: "5"}, {Id: "6"}, + {Id: "7"}, {Id: "8"}, + {Id: "9"}, {Id: "10"}, + {Id: "11"}, {Id: "12"}, + {Id: "13"}, {Id: "14"}, + {Id: "15"}, {Id: "16"}, + {Id: "17"}, {Id: "18"}, + {Id: "19"}, {Id: "20"}, + {Id: "21"}, {Id: "22"}, + {Id: "23"}, {Id: "24"}, + {Id: "25"}, + } + + p := Build(blocks, 2, 3) + var buf bytes.Buffer + printPlan(&buf, "", p, true) + // Ensure that the plan has not been modified + // during traversal performed by printPlan. + assert.Equal(t, Build(blocks, 2, 3), p) + + expected, err := os.ReadFile("testdata/plan.txt") + require.NoError(t, err) + assert.Equal(t, string(expected), buf.String()) + + // Root node (sub-)plan must be identical to the original plan. + buf.Reset() + printPlan(&buf, "", p.Root().Plan(), true) + assert.Equal(t, string(expected), buf.String()) +} + +func Test_Plan_propagation(t *testing.T) { + blocks := []*metastorev1.BlockMeta{ + {Id: "01J2JY1K5J4T2WNDV05CHVFCA9"}, + {Id: "01J2JY21VVYYV4PMDGK4TVMZ6H"}, + {Id: "01J2JY2GF83EF0QMW94T19MXHN"}, + {Id: "01J2JY45S90MWF6ZER08BFGPGP"}, + {Id: "01J2JY5JR0C9V64EP32RPH61E7"}, + {Id: "01J2JY61BG7QBPNK54EY8N0T6K"}, + {Id: "01J2JZ0A7MPZMR0R745HAZD1S9"}, + {Id: "01J2JZ0RY9WCA01S322EG201R8"}, + } + + var buf bytes.Buffer + expected := `merge +read [id:"01J2JY1K5J4T2WNDV05CHVFCA9" id:"01J2JY21VVYYV4PMDGK4TVMZ6H"] +read [id:"01J2JY2GF83EF0QMW94T19MXHN" id:"01J2JY45S90MWF6ZER08BFGPGP"] +read [id:"01J2JY5JR0C9V64EP32RPH61E7" id:"01J2JY61BG7QBPNK54EY8N0T6K"] +read [id:"01J2JZ0A7MPZMR0R745HAZD1S9" id:"01J2JZ0RY9WCA01S322EG201R8"] +` + + p := Build(blocks, 2, 5).Proto() + n := []*querybackendv1.QueryPlan{p} + var x *QueryPlan + for len(n) > 0 { + x, n = Open(n[0]), n[1:] + + switch r := x.Root(); r.Type { + case NodeMerge: + _, _ = fmt.Fprintln(&buf, "merge") + c := r.Children() + for c.Next() { + n = append(n, c.At().Plan().Proto()) + } + + case NodeRead: + _, _ = fmt.Fprintln(&buf, "read", iter.MustSlice(r.Blocks())) + + default: + panic("query plan: unknown node type") + } + } + + require.Equal(t, expected, buf.String()) +} + +func Test_Plan_skip_top_merge(t *testing.T) { + blocks := []*metastorev1.BlockMeta{ + {Id: "01J2JY1K5J4T2WNDV05CHVFCA9"}, + {Id: "01J2JY21VVYYV4PMDGK4TVMZ6H"}, + } + + var buf bytes.Buffer + expected := `[id:"01J2JY1K5J4T2WNDV05CHVFCA9" id:"01J2JY21VVYYV4PMDGK4TVMZ6H"]` + + p := Build(blocks, 2, 5).Proto() + n := []*querybackendv1.QueryPlan{p} + var x *QueryPlan + for len(n) > 0 { + x, n = Open(n[0]), n[1:] + + switch r := x.Root(); r.Type { + case NodeMerge: + _, _ = fmt.Fprintln(&buf, "merge") + c := r.Children() + for c.Next() { + n = append(n, c.At().Plan().Proto()) + } + + case NodeRead: + _, _ = fmt.Fprint(&buf, iter.MustSlice(r.Blocks())) + + default: + panic("query plan: unknown node type") + } + } + + require.Equal(t, expected, buf.String()) +} diff --git a/pkg/experiment/querybackend/queryplan/testdata/plan.txt b/pkg/experiment/querybackend/queryplan/testdata/plan.txt new file mode 100644 index 0000000000..84b82bf71e --- /dev/null +++ b/pkg/experiment/querybackend/queryplan/testdata/plan.txt @@ -0,0 +1,46 @@ +merge {children: 2, nodes: 21, blocks: 25} + merge {children: 3, nodes: 13, blocks: 18} + merge {children: 3, nodes: 4, blocks: 6} + read {children: 2, nodes: 1, blocks: 2} + id:"1" + id:"2" + read {children: 2, nodes: 1, blocks: 2} + id:"3" + id:"4" + read {children: 2, nodes: 1, blocks: 2} + id:"5" + id:"6" + merge {children: 3, nodes: 4, blocks: 6} + read {children: 2, nodes: 1, blocks: 2} + id:"7" + id:"8" + read {children: 2, nodes: 1, blocks: 2} + id:"9" + id:"10" + read {children: 2, nodes: 1, blocks: 2} + id:"11" + id:"12" + merge {children: 3, nodes: 4, blocks: 6} + read {children: 2, nodes: 1, blocks: 2} + id:"13" + id:"14" + read {children: 2, nodes: 1, blocks: 2} + id:"15" + id:"16" + read {children: 2, nodes: 1, blocks: 2} + id:"17" + id:"18" + merge {children: 2, nodes: 7, blocks: 7} + merge {children: 3, nodes: 4, blocks: 6} + read {children: 2, nodes: 1, blocks: 2} + id:"19" + id:"20" + read {children: 2, nodes: 1, blocks: 2} + id:"21" + id:"22" + read {children: 2, nodes: 1, blocks: 2} + id:"23" + id:"24" + merge {children: 1, nodes: 2, blocks: 1} + read {children: 1, nodes: 1, blocks: 1} + id:"25" diff --git a/pkg/experiment/querybackend/report_aggregator.go b/pkg/experiment/querybackend/report_aggregator.go new file mode 100644 index 0000000000..44be8d83f9 --- /dev/null +++ b/pkg/experiment/querybackend/report_aggregator.go @@ -0,0 +1,155 @@ +package querybackend + +import ( + "fmt" + "sync" + + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" +) + +var ( + aggregatorMutex = new(sync.RWMutex) + aggregators = map[querybackendv1.ReportType]aggregatorProvider{} + queryReportType = map[querybackendv1.QueryType]querybackendv1.ReportType{} +) + +type aggregatorProvider func(*querybackendv1.InvokeRequest) aggregator + +type aggregator interface { + // The method is called concurrently. + aggregate(*querybackendv1.Report) error + // build the aggregation result. It's guaranteed that aggregate() + // was called at least once before report() is called. + build() *querybackendv1.Report +} + +func registerAggregator(t querybackendv1.ReportType, ap aggregatorProvider) { + aggregatorMutex.Lock() + defer aggregatorMutex.Unlock() + _, ok := aggregators[t] + if ok { + panic(fmt.Sprintf("%s: aggregator already registered", t)) + } + aggregators[t] = ap +} + +func getAggregator(r *querybackendv1.InvokeRequest, x *querybackendv1.Report) (aggregator, error) { + aggregatorMutex.RLock() + defer aggregatorMutex.RUnlock() + a, ok := aggregators[x.ReportType] + if !ok { + return nil, fmt.Errorf("unknown build type %s", x.ReportType) + } + return a(r), nil +} + +func registerQueryReportType(q querybackendv1.QueryType, r querybackendv1.ReportType) { + aggregatorMutex.Lock() + defer aggregatorMutex.Unlock() + v, ok := queryReportType[q] + if ok { + panic(fmt.Sprintf("%s: handler already registered (%s)", q, v)) + } + queryReportType[q] = r +} + +func QueryReportType(q querybackendv1.QueryType) querybackendv1.ReportType { + aggregatorMutex.RLock() + defer aggregatorMutex.RUnlock() + r, ok := queryReportType[q] + if !ok { + panic(fmt.Sprintf("unknown build type %s", q)) + } + return r +} + +type reportAggregator struct { + request *querybackendv1.InvokeRequest + sm sync.Mutex + staged map[querybackendv1.ReportType]*querybackendv1.Report + aggregators map[querybackendv1.ReportType]aggregator +} + +func newAggregator(request *querybackendv1.InvokeRequest) *reportAggregator { + return &reportAggregator{ + request: request, + staged: make(map[querybackendv1.ReportType]*querybackendv1.Report), + aggregators: make(map[querybackendv1.ReportType]aggregator), + } +} + +func (ra *reportAggregator) aggregateResponse(resp *querybackendv1.InvokeResponse, err error) error { + if err != nil { + return err + } + for _, r := range resp.Reports { + if err = ra.aggregateReport(r); err != nil { + return err + } + } + return nil +} + +func (ra *reportAggregator) aggregateReport(r *querybackendv1.Report) (err error) { + if r == nil { + return nil + } + ra.sm.Lock() + v, found := ra.staged[r.ReportType] + if !found { + // We delay aggregation until we have at least two + // reports of the same type. Otherwise, we just store + // the report and will return it as is, if it is the + // only one. + ra.staged[r.ReportType] = r + ra.sm.Unlock() + return nil + } + // Found a staged report of the same type. + if v != nil { + // It should be aggregated and removed from the table. + err = ra.aggregateReportNoCheck(v) + ra.staged[r.ReportType] = nil + } + ra.sm.Unlock() + if err != nil { + return err + } + return ra.aggregateReportNoCheck(r) +} + +func (ra *reportAggregator) aggregateReportNoCheck(report *querybackendv1.Report) (err error) { + a, ok := ra.aggregators[report.ReportType] + if !ok { + a, err = getAggregator(ra.request, report) + if err != nil { + return err + } + ra.aggregators[report.ReportType] = a + } + return a.aggregate(report) +} + +func (ra *reportAggregator) aggregateStaged() error { + for _, r := range ra.staged { + if r != nil { + if err := ra.aggregateReportNoCheck(r); err != nil { + return err + } + } + } + return nil +} + +func (ra *reportAggregator) response() (*querybackendv1.InvokeResponse, error) { + if err := ra.aggregateStaged(); err != nil { + return nil, err + } + reports := make([]*querybackendv1.Report, 0, len(ra.staged)) + for t, a := range ra.aggregators { + r := a.build() + r.ReportType = t + reports = append(reports, r) + } + return &querybackendv1.InvokeResponse{Reports: reports}, nil +} diff --git a/pkg/experiment/queryfrontend/frontend_meta.go b/pkg/experiment/queryfrontend/frontend_meta.go new file mode 100644 index 0000000000..839b36efb7 --- /dev/null +++ b/pkg/experiment/queryfrontend/frontend_meta.go @@ -0,0 +1,188 @@ +package queryfrontend + +import ( + "context" + "fmt" + "math/rand" + "slices" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + "github.com/grafana/pyroscope/pkg/experiment/querybackend" + querybackendclient "github.com/grafana/pyroscope/pkg/experiment/querybackend/client" + "github.com/grafana/pyroscope/pkg/experiment/querybackend/queryplan" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +func ListMetadata( + ctx context.Context, + client *metastoreclient.Client, + logger log.Logger, + tenants []string, + startTime, endTime int64, + query string, +) ([]*metastorev1.BlockMeta, error) { + _ = level.Info(logger).Log("msg", "listing metadata", + "tenants", strings.Join(tenants, ","), + "start", startTime, + "end", endTime, + "query", query, + ) + resp, err := client.ListBlocksForQuery(ctx, &metastorev1.ListBlocksForQueryRequest{ + TenantId: tenants, + StartTime: startTime, + EndTime: endTime, + Query: query, + }) + if err != nil { + // TODO: Not sure if we want to pass it through + return nil, err + } + // TODO: Metrics + printStats(logger, resp.Blocks) + return resp.Blocks, nil +} + +func printStats(logger log.Logger, blocks []*metastorev1.BlockMeta) { + type blockMetaStats struct { + level uint32 + minTime int64 + maxTime int64 + size uint64 + count int + } + m := make(map[uint32]*blockMetaStats) + for _, b := range blocks { + s, ok := m[b.CompactionLevel] + if !ok { + s = &blockMetaStats{level: b.CompactionLevel} + m[b.CompactionLevel] = s + } + for _, x := range b.TenantServices { + s.size += x.Size + } + s.count++ + } + sorted := make([]*blockMetaStats, 0, len(m)) + for _, s := range m { + sorted = append(sorted, s) + } + slices.SortFunc(sorted, func(a, b *blockMetaStats) int { + return int(a.level - b.level) + }) + fields := make([]interface{}, 0, 4+len(sorted)*2) + fields = append(fields, "msg", "block metadata list", "blocks_total", fmt.Sprint(len(blocks))) + for _, s := range sorted { + fields = append(fields, + fmt.Sprintf("l%d_blocks", s.level), fmt.Sprint(s.count), + fmt.Sprintf("l%d_size", s.level), fmt.Sprint(s.size), + ) + } + _ = level.Info(logger).Log(fields...) +} + +var xrand = rand.New(rand.NewSource(4349676827832284783)) + +func Query( + ctx context.Context, + startTime, endTime int64, + tenants []string, + labelSelector string, + q *querybackendv1.Query, + mc *metastoreclient.Client, + qc *querybackendclient.Client, + logger log.Logger, +) (*querybackendv1.Report, error) { + blocks, err := ListMetadata(ctx, mc, logger, tenants, startTime, endTime, labelSelector) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + return nil, nil + } + // Randomize the order of blocks to avoid hotspots. + xrand.Shuffle(len(blocks), func(i, j int) { + blocks[i], blocks[j] = blocks[j], blocks[i] + }) + // TODO: Params. + p := queryplan.Build(blocks, 2, 10) + resp, err := qc.Invoke(ctx, &querybackendv1.InvokeRequest{ + Tenant: tenants, + StartTime: startTime, + EndTime: endTime, + LabelSelector: labelSelector, + Options: &querybackendv1.InvokeOptions{}, + QueryPlan: p.Proto(), + Query: []*querybackendv1.Query{q}, + }) + if err != nil { + return nil, err + } + return findReport(querybackend.QueryReportType(q.QueryType), resp.Reports), nil +} + +func BuildLabelSelectorFromMatchers(matchers []string) (string, error) { + parsed, err := parseMatchers(matchers) + if err != nil { + return "", fmt.Errorf("parsing label selector: %w", err) + } + return matchersToLabelSelector(parsed), nil +} + +func BuildLabelSelectorWithProfileType(labelSelector, profileTypeID string) (string, error) { + matchers, err := parser.ParseMetricSelector(labelSelector) + if err != nil { + return "", fmt.Errorf("parsing label selector %q: %w", labelSelector, err) + } + profileType, err := phlaremodel.ParseProfileTypeSelector(profileTypeID) + if err != nil { + return "", fmt.Errorf("parsing profile type ID %q: %w", profileTypeID, err) + } + matchers = append(matchers, phlaremodel.SelectorFromProfileType(profileType)) + return matchersToLabelSelector(matchers), nil +} + +func parseMatchers(matchers []string) ([]*labels.Matcher, error) { + parsed := make([]*labels.Matcher, 0, len(matchers)) + for _, m := range matchers { + s, err := parser.ParseMetricSelector(m) + if err != nil { + return nil, fmt.Errorf("failed to parse label selector %q: %w", s, err) + } + parsed = append(parsed, s...) + } + return parsed, nil +} + +func matchersToLabelSelector(matchers []*labels.Matcher) string { + var q strings.Builder + q.WriteByte('{') + for i, m := range matchers { + if i > 0 { + q.WriteByte(',') + } + q.WriteString(m.Name) + q.WriteString(m.Type.String()) + q.WriteByte('"') + q.WriteString(m.Value) + q.WriteByte('"') + } + q.WriteByte('}') + return q.String() +} + +func findReport(r querybackendv1.ReportType, reports []*querybackendv1.Report) *querybackendv1.Report { + for _, x := range reports { + if x.ReportType == r { + return x + } + } + return nil +} diff --git a/pkg/experiment/queryfrontend/frontend_profile_types.go b/pkg/experiment/queryfrontend/frontend_profile_types.go new file mode 100644 index 0000000000..32b41a1c2d --- /dev/null +++ b/pkg/experiment/queryfrontend/frontend_profile_types.go @@ -0,0 +1,155 @@ +package queryfrontend + +import ( + "context" + "slices" + "sort" + + "connectrpc.com/connect" + "github.com/go-kit/log" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +var profileTypeLabels2 = []string{ + "__profile_type__", + "service_name", +} + +var profileTypeLabels5 = []string{ + "__name__", + "__profile_type__", + "__type__", + "pyroscope_app", + "service_name", +} + +func IsProfileTypeQuery(labels, matchers []string) bool { + if len(matchers) > 0 { + return false + } + var s []string + switch len(labels) { + case 2: + s = profileTypeLabels2 + case 5: + s = profileTypeLabels5 + default: + return false + } + sort.Strings(labels) + return slices.Compare(s, labels) == 0 +} + +func ListProfileTypesFromMetadataAsSeriesLabels( + ctx context.Context, + client *metastoreclient.Client, + logger log.Logger, + tenants []string, + startTime, endTime int64, + labels []string, + +) (*connect.Response[querierv1.SeriesResponse], error) { + resp, err := listProfileTypesFromMetadata(ctx, client, logger, tenants, startTime, endTime) + if err != nil { + return nil, err + } + return connect.NewResponse(&querierv1.SeriesResponse{ + LabelsSet: resp.buildSeriesLabels(labels), + }), nil +} + +func listProfileTypesFromMetadata( + ctx context.Context, + client *metastoreclient.Client, + logger log.Logger, + tenants []string, + startTime, endTime int64, +) (*ptypes, error) { + metas, err := ListMetadata(ctx, client, logger, tenants, startTime, endTime, "{}") + if err != nil { + return nil, err + } + p := newProfileTypesResponseBuilder(len(metas) * 8) + for _, m := range metas { + for _, s := range m.TenantServices { + p.addServiceProfileTypes(s.Name, s.ProfileTypes...) + } + } + return p, nil +} + +type ptypes struct { + services map[string]map[string]struct{} +} + +func newProfileTypesResponseBuilder(size int) *ptypes { + return &ptypes{ + services: make(map[string]map[string]struct{}, size), + } +} + +func (p *ptypes) addServiceProfileTypes(s string, types ...string) { + sp, ok := p.services[s] + if !ok { + sp = make(map[string]struct{}, len(types)) + p.services[s] = sp + } + for _, t := range types { + sp[t] = struct{}{} + } +} + +func (p *ptypes) buildSeriesLabels(names []string) (labels []*typesv1.Labels) { + switch len(names) { + case 2: + labels = p.buildSeriesLabels2() + case 5: + labels = p.buildSeriesLabels5() + default: + panic("bug: invalid request: expected 2 or 5 label names") + } + slices.SortFunc(labels, func(a, b *typesv1.Labels) int { + return phlaremodel.CompareLabelPairs(a.Labels, b.Labels) + }) + return labels +} + +func (p *ptypes) buildSeriesLabels2() []*typesv1.Labels { + labels := make([]*typesv1.Labels, 0, len(p.services)*4) + for n, types := range p.services { + for t := range types { + labels = append(labels, &typesv1.Labels{ + Labels: []*typesv1.LabelPair{ + {Name: "__profile_type__", Value: t}, + {Name: "service_name", Value: n}, + }, + }) + } + } + return labels +} + +func (p *ptypes) buildSeriesLabels5() []*typesv1.Labels { + labels := make([]*typesv1.Labels, 0, len(p.services)*4) + for n, types := range p.services { + for t := range types { + pt, err := phlaremodel.ParseProfileTypeSelector(t) + if err != nil { + panic(err) + } + labels = append(labels, &typesv1.Labels{ + Labels: []*typesv1.LabelPair{ + {Name: "__profile_type__", Value: t}, + {Name: "service_name", Value: n}, + {Name: "__name__", Value: pt.Name}, + {Name: "__type__", Value: pt.SampleType}, + }, + }) + } + } + return labels +} diff --git a/pkg/frontend/frontend_select_series.go b/pkg/frontend/frontend_select_time_series.go similarity index 96% rename from pkg/frontend/frontend_select_series.go rename to pkg/frontend/frontend_select_time_series.go index a7cf37ac78..0c1b4ea9b3 100644 --- a/pkg/frontend/frontend_select_series.go +++ b/pkg/frontend/frontend_select_time_series.go @@ -51,7 +51,7 @@ func (f *Frontend) SelectSeries(ctx context.Context, g.SetLimit(maxConcurrent) } - m := phlaremodel.NewSeriesMerger(false) + m := phlaremodel.NewTimeSeriesMerger(false) interval := validationutil.MaxDurationOrZeroPerTenant(tenantIDs, f.limits.QuerySplitDuration) intervals := NewTimeIntervalIterator(time.UnixMilli(c.Msg.Start), time.UnixMilli(c.Msg.End), interval, WithAlignment(time.Second*time.Duration(c.Msg.Step))) @@ -75,7 +75,7 @@ func (f *Frontend) SelectSeries(ctx context.Context, if err != nil { return err } - m.MergeSeries(resp.Msg.Series) + m.MergeTimeSeries(resp.Msg.Series) return nil }) } @@ -84,5 +84,5 @@ func (f *Frontend) SelectSeries(ctx context.Context, return nil, err } - return connect.NewResponse(&querierv1.SelectSeriesResponse{Series: m.Series()}), nil + return connect.NewResponse(&querierv1.SelectSeriesResponse{Series: m.TimeSeries()}), nil } diff --git a/pkg/frontend/frontend_series.go b/pkg/frontend/frontend_series_labels.go similarity index 100% rename from pkg/frontend/frontend_series.go rename to pkg/frontend/frontend_series_labels.go diff --git a/pkg/iter/iter.go b/pkg/iter/iter.go index 12a183426f..1460289246 100644 --- a/pkg/iter/iter.go +++ b/pkg/iter/iter.go @@ -163,6 +163,14 @@ func Slice[T any](it Iterator[T]) ([]T, error) { return result, it.Err() } +func MustSlice[T any](it Iterator[T]) []T { + s, err := Slice(it) + if err != nil { + panic(err) + } + return s +} + // CloneN returns N copy of the iterator. // The returned iterators are independent of the original iterator. // The original might be exhausted and should be discarded. diff --git a/pkg/model/labels_merger.go b/pkg/model/labels_merger.go new file mode 100644 index 0000000000..ebfb66cfa6 --- /dev/null +++ b/pkg/model/labels_merger.go @@ -0,0 +1,101 @@ +package model + +import ( + "slices" + "sort" + "sync" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" +) + +type LabelMerger struct { + mu sync.Mutex + names map[string]struct{} + values map[string]struct{} + series map[uint64]*typesv1.Labels +} + +func NewLabelMerger() *LabelMerger { + return &LabelMerger{ + names: make(map[string]struct{}), + values: make(map[string]struct{}), + series: make(map[uint64]*typesv1.Labels), + } +} + +func (m *LabelMerger) MergeLabelNames(names []string) { + m.mu.Lock() + defer m.mu.Unlock() + for _, n := range names { + m.names[n] = struct{}{} + } +} + +func (m *LabelMerger) MergeLabelValues(values []string) { + m.mu.Lock() + defer m.mu.Unlock() + for _, v := range values { + m.values[v] = struct{}{} + } +} + +func (m *LabelMerger) HasNames() bool { + return len(m.names) > 0 +} + +func (m *LabelMerger) LabelNames() []string { + m.mu.Lock() + defer m.mu.Unlock() + s := make([]string, len(m.names)) + var i int + for n := range m.names { + s[i] = n + i++ + } + sort.Strings(s) + return s +} + +func (m *LabelMerger) HasValues() bool { + return len(m.values) > 0 +} + +func (m *LabelMerger) LabelValues() []string { + m.mu.Lock() + defer m.mu.Unlock() + s := make([]string, len(m.values)) + var i int + for v := range m.values { + s[i] = v + i++ + } + sort.Strings(s) + return s +} + +func (m *LabelMerger) MergeSeries(series []*typesv1.Labels) { + m.mu.Lock() + defer m.mu.Unlock() + for _, s := range series { + m.series[Labels(s.Labels).Hash()] = s + } +} + +func (m *LabelMerger) SeriesLabels() []*typesv1.Labels { + m.mu.Lock() + defer m.mu.Unlock() + s := make([]*typesv1.Labels, len(m.series)) + var i int + for _, v := range m.series { + s[i] = v + i++ + } + slices.SortFunc(s, func(a, b *typesv1.Labels) int { + return CompareLabelPairs(a.Labels, b.Labels) + }) + return s +} + +func (m *LabelMerger) HasSeries() bool { + return len(m.series) > 0 +} diff --git a/pkg/iter/profiles.go b/pkg/model/profiles.go similarity index 77% rename from pkg/iter/profiles.go rename to pkg/model/profiles.go index b4d00ea01b..5ea1fe6b65 100644 --- a/pkg/iter/profiles.go +++ b/pkg/model/profiles.go @@ -1,10 +1,10 @@ -package iter +package model import ( "github.com/grafana/dskit/multierror" "github.com/prometheus/common/model" - phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/iter" "github.com/grafana/pyroscope/pkg/util/loser" ) @@ -13,20 +13,20 @@ type Timestamp interface { } type Profile interface { - Labels() phlaremodel.Labels + Labels() Labels Timestamp } func lessProfile(p1, p2 Profile) bool { if p1.Timestamp() == p2.Timestamp() { // todo we could compare SeriesRef here - return phlaremodel.CompareLabelPairs(p1.Labels(), p2.Labels()) < 0 + return CompareLabelPairs(p1.Labels(), p2.Labels()) < 0 } return p1.Timestamp() < p2.Timestamp() } type MergeIterator[P Profile] struct { - tree *loser.Tree[P, Iterator[P]] + tree *loser.Tree[P, iter.Iterator[P]] closeErrs multierror.MultiError current P deduplicate bool @@ -35,34 +35,34 @@ type MergeIterator[P Profile] struct { // NewMergeIterator returns an iterator that k-way merges the given iterators. // The given iterators must be sorted by timestamp and labels themselves. // Optionally, the iterator can deduplicate profiles with the same timestamp and labels. -func NewMergeIterator[P Profile](max P, deduplicate bool, iters ...Iterator[P]) Iterator[P] { +func NewMergeIterator[P Profile](max P, deduplicate bool, iters ...iter.Iterator[P]) iter.Iterator[P] { if len(iters) == 0 { - return NewEmptyIterator[P]() + return iter.NewEmptyIterator[P]() } if len(iters) == 1 { // No need to merge a single iterator. // We should never allow a single iterator to be passed in because return iters[0] } - iter := &MergeIterator[P]{ + m := &MergeIterator[P]{ deduplicate: deduplicate, current: max, } - iter.tree = loser.New( + m.tree = loser.New( iters, max, - func(s Iterator[P]) P { + func(s iter.Iterator[P]) P { return s.At() }, func(p1, p2 P) bool { return lessProfile(p1, p2) }, - func(s Iterator[P]) { + func(s iter.Iterator[P]) { if err := s.Close(); err != nil { - iter.closeErrs.Add(err) + m.closeErrs.Add(err) } }) - return iter + return m } func (i *MergeIterator[P]) Next() bool { @@ -74,7 +74,7 @@ func (i *MergeIterator[P]) Next() bool { return true } if next.At().Timestamp() != i.current.Timestamp() || - phlaremodel.CompareLabelPairs(next.At().Labels(), i.current.Labels()) != 0 { + CompareLabelPairs(next.At().Labels(), i.current.Labels()) != 0 { i.current = next.At() return true } @@ -97,11 +97,11 @@ func (i *MergeIterator[P]) Close() error { } type TimeRangedIterator[T Timestamp] struct { - Iterator[T] + iter.Iterator[T] min, max model.Time } -func NewTimeRangedIterator[T Timestamp](it Iterator[T], min, max model.Time) Iterator[T] { +func NewTimeRangedIterator[T Timestamp](it iter.Iterator[T], min, max model.Time) iter.Iterator[T] { return &TimeRangedIterator[T]{ Iterator: it, min: min, diff --git a/pkg/iter/profiles_test.go b/pkg/model/profiles_test.go similarity index 86% rename from pkg/iter/profiles_test.go rename to pkg/model/profiles_test.go index 22701efa27..185b81bca1 100644 --- a/pkg/iter/profiles_test.go +++ b/pkg/model/profiles_test.go @@ -1,4 +1,4 @@ -package iter +package model import ( "math" @@ -8,21 +8,21 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/goleak" - phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/iter" ) var ( - aLabels = phlaremodel.LabelsFromStrings("foo", "a") - bLabels = phlaremodel.LabelsFromStrings("foo", "b") - cLabels = phlaremodel.LabelsFromStrings("foo", "c") + aLabels = LabelsFromStrings("foo", "a") + bLabels = LabelsFromStrings("foo", "b") + cLabels = LabelsFromStrings("foo", "c") ) type profile struct { - labels phlaremodel.Labels + labels Labels timestamp model.Time } -func (p profile) Labels() phlaremodel.Labels { +func (p profile) Labels() Labels { return p.labels } @@ -120,9 +120,9 @@ func TestMergeIterator(t *testing.T) { } { tt := tt t.Run(tt.name, func(t *testing.T) { - iters := make([]Iterator[profile], len(tt.input)) + iters := make([]iter.Iterator[profile], len(tt.input)) for i, input := range tt.input { - iters[i] = NewSliceIterator(input) + iters[i] = iter.NewSliceIterator(input) } it := NewMergeIterator( profile{timestamp: math.MaxInt64}, @@ -162,9 +162,9 @@ func Test_BufferedIterator(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - actual, err := Slice( - NewBufferedIterator( - NewSliceIterator(tc.in), tc.size), + actual, err := iter.Slice( + iter.NewBufferedIterator( + iter.NewSliceIterator(tc.in), tc.size), ) require.NoError(t, err) require.Equal(t, tc.in, actual) @@ -175,8 +175,8 @@ func Test_BufferedIterator(t *testing.T) { func Test_BufferedIteratorClose(t *testing.T) { defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) - it := NewBufferedIterator( - NewSliceIterator(generatesProfiles(t, 100)), 10) + it := iter.NewBufferedIterator( + iter.NewSliceIterator(generatesProfiles(t, 100)), 10) require.NoError(t, it.Close()) } diff --git a/pkg/model/time_series.go b/pkg/model/time_series.go new file mode 100644 index 0000000000..b8d7182c5b --- /dev/null +++ b/pkg/model/time_series.go @@ -0,0 +1,200 @@ +package model + +import ( + "math" + "sort" + + "github.com/prometheus/common/model" + "github.com/samber/lo" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/iter" +) + +type TimeSeriesValue struct { + Ts int64 + Lbs []*typesv1.LabelPair + LabelsHash uint64 + Value float64 +} + +func (p TimeSeriesValue) Labels() Labels { return p.Lbs } +func (p TimeSeriesValue) Timestamp() model.Time { return model.Time(p.Ts) } + +type TimeSeriesIterator struct { + point []*typesv1.Point + curr TimeSeriesValue +} + +func NewSeriesIterator(lbs []*typesv1.LabelPair, points []*typesv1.Point) *TimeSeriesIterator { + return &TimeSeriesIterator{ + point: points, + + curr: TimeSeriesValue{ + Lbs: lbs, + LabelsHash: Labels(lbs).Hash(), + }, + } +} + +func (s *TimeSeriesIterator) Next() bool { + if len(s.point) == 0 { + return false + } + p := s.point[0] + s.point = s.point[1:] + s.curr.Ts = p.Timestamp + s.curr.Value = p.Value + return true +} + +func (s *TimeSeriesIterator) At() TimeSeriesValue { return s.curr } +func (s *TimeSeriesIterator) Err() error { return nil } +func (s *TimeSeriesIterator) Close() error { return nil } + +func NewTimeSeriesMergeIterator(series []*typesv1.Series) iter.Iterator[TimeSeriesValue] { + iters := make([]iter.Iterator[TimeSeriesValue], 0, len(series)) + for _, s := range series { + iters = append(iters, NewSeriesIterator(s.Labels, s.Points)) + } + return NewMergeIterator(TimeSeriesValue{Ts: math.MaxInt64}, false, iters...) +} + +type TimeSeriesAggregator interface { + Add(ts int64, value float64) + GetAndReset() *typesv1.Point + IsEmpty() bool + GetTimestamp() int64 +} + +func NewTimeSeriesAggregator(aggregation *typesv1.TimeSeriesAggregationType) TimeSeriesAggregator { + if aggregation == nil { + return &sumTimeSeriesAggregator{ts: -1} + } + if *aggregation == typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_AVERAGE { + return &avgTimeSeriesAggregator{ts: -1} + } + return &sumTimeSeriesAggregator{ts: -1} +} + +type sumTimeSeriesAggregator struct { + ts int64 + sum float64 +} + +func (a *sumTimeSeriesAggregator) Add(ts int64, value float64) { + a.ts = ts + a.sum += value +} + +func (a *sumTimeSeriesAggregator) GetAndReset() *typesv1.Point { + tsCopy := a.ts + sumCopy := a.sum + a.ts = -1 + a.sum = 0 + return &typesv1.Point{ + Timestamp: tsCopy, + Value: sumCopy, + } +} + +func (a *sumTimeSeriesAggregator) IsEmpty() bool { return a.ts == -1 } +func (a *sumTimeSeriesAggregator) GetTimestamp() int64 { return a.ts } + +type avgTimeSeriesAggregator struct { + ts int64 + sum float64 + count int64 +} + +func (a *avgTimeSeriesAggregator) Add(ts int64, value float64) { + a.ts = ts + a.sum += value + a.count++ +} + +func (a *avgTimeSeriesAggregator) GetAndReset() *typesv1.Point { + avg := a.sum / float64(a.count) + tsCopy := a.ts + a.ts = -1 + a.sum = 0 + a.count = 0 + return &typesv1.Point{ + Timestamp: tsCopy, + Value: avg, + } +} + +func (a *avgTimeSeriesAggregator) IsEmpty() bool { return a.ts == -1 } +func (a *avgTimeSeriesAggregator) GetTimestamp() int64 { return a.ts } + +// RangeSeries aggregates profiles into series. +// Series contains points spaced by step from start to end. +// Profiles from the same step are aggregated into one point. +func RangeSeries(it iter.Iterator[TimeSeriesValue], start, end, step int64, aggregation *typesv1.TimeSeriesAggregationType) []*typesv1.Series { + defer it.Close() + seriesMap := make(map[uint64]*typesv1.Series) + aggregators := make(map[uint64]TimeSeriesAggregator) + + if !it.Next() { + return nil + } + + // advance from the start to the end, adding each step results to the map. +Outer: + for currentStep := start; currentStep <= end; currentStep += step { + for { + aggregator, ok := aggregators[it.At().LabelsHash] + if !ok { + aggregator = NewTimeSeriesAggregator(aggregation) + aggregators[it.At().LabelsHash] = aggregator + } + if it.At().Ts > currentStep { + if !aggregator.IsEmpty() { + series := seriesMap[it.At().LabelsHash] + series.Points = append(series.Points, aggregator.GetAndReset()) + } + break // no more profiles for the currentStep + } + // find or create series + series, ok := seriesMap[it.At().LabelsHash] + if !ok { + seriesMap[it.At().LabelsHash] = &typesv1.Series{ + Labels: it.At().Lbs, + Points: []*typesv1.Point{}, + } + aggregator.Add(currentStep, it.At().Value) + if !it.Next() { + break Outer + } + continue + } + // Aggregate point if it is in the current step. + if aggregator.GetTimestamp() == currentStep { + aggregator.Add(currentStep, it.At().Value) + if !it.Next() { + break Outer + } + continue + } + // Next step is missing + if !aggregator.IsEmpty() { + series.Points = append(series.Points, aggregator.GetAndReset()) + } + aggregator.Add(currentStep, it.At().Value) + if !it.Next() { + break Outer + } + } + } + for lblHash, aggregator := range aggregators { + if !aggregator.IsEmpty() { + seriesMap[lblHash].Points = append(seriesMap[lblHash].Points, aggregator.GetAndReset()) + } + } + series := lo.Values(seriesMap) + sort.Slice(series, func(i, j int) bool { + return CompareLabelPairs(series[i].Labels, series[j].Labels) < 0 + }) + return series +} diff --git a/pkg/model/time_series_builder.go b/pkg/model/time_series_builder.go new file mode 100644 index 0000000000..9d8b070fae --- /dev/null +++ b/pkg/model/time_series_builder.go @@ -0,0 +1,77 @@ +package model + +import ( + "sort" + + "github.com/prometheus/common/model" + "github.com/samber/lo" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" +) + +type TimeSeriesBuilder struct { + labelsByFingerprint map[model.Fingerprint]string + labelBuf []byte + by []string + + series seriesByLabels +} + +func NewTimeSeriesBuilder(by ...string) *TimeSeriesBuilder { + var b TimeSeriesBuilder + b.Init(by...) + return &b +} + +func (s *TimeSeriesBuilder) Init(by ...string) { + s.labelsByFingerprint = map[model.Fingerprint]string{} + s.series = make(seriesByLabels) + s.labelBuf = make([]byte, 0, 1024) + s.by = by +} + +func (s *TimeSeriesBuilder) Add(fp model.Fingerprint, lbs Labels, ts int64, value float64) { + labelsByString, ok := s.labelsByFingerprint[fp] + if !ok { + s.labelBuf = lbs.BytesWithLabels(s.labelBuf, s.by...) + labelsByString = string(s.labelBuf) + s.labelsByFingerprint[fp] = labelsByString + if _, ok := s.series[labelsByString]; !ok { + s.series[labelsByString] = &typesv1.Series{ + Labels: lbs.WithLabels(s.by...), + Points: []*typesv1.Point{ + { + Timestamp: ts, + Value: value, + }, + }, + } + return + } + } + series := s.series[labelsByString] + series.Points = append(series.Points, &typesv1.Point{ + Timestamp: ts, + Value: value, + }) +} + +func (s *TimeSeriesBuilder) Build() []*typesv1.Series { + return s.series.normalize() +} + +type seriesByLabels map[string]*typesv1.Series + +func (m seriesByLabels) normalize() []*typesv1.Series { + result := lo.Values(m) + sort.Slice(result, func(i, j int) bool { + return CompareLabelPairs(result[i].Labels, result[j].Labels) < 0 + }) + // we have to sort the points in each series because labels reduction may have changed the order + for _, s := range result { + sort.Slice(s.Points, func(i, j int) bool { + return s.Points[i].Timestamp < s.Points[j].Timestamp + }) + } + return result +} diff --git a/pkg/model/series.go b/pkg/model/time_series_merger.go similarity index 71% rename from pkg/model/series.go rename to pkg/model/time_series_merger.go index 5de5a8f2dd..dee074eecf 100644 --- a/pkg/model/series.go +++ b/pkg/model/time_series_merger.go @@ -8,34 +8,34 @@ import ( ) func MergeSeries(aggregation *typesv1.TimeSeriesAggregationType, series ...[]*typesv1.Series) []*typesv1.Series { - var m *SeriesMerger + var m *TimeSeriesMerger if aggregation == nil || *aggregation == typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_SUM { - m = NewSeriesMerger(true) + m = NewTimeSeriesMerger(true) } else { - m = NewSeriesMerger(false) + m = NewTimeSeriesMerger(false) } for _, s := range series { - m.MergeSeries(s) + m.MergeTimeSeries(s) } - return m.Series() + return m.TimeSeries() } -type SeriesMerger struct { +type TimeSeriesMerger struct { mu sync.Mutex series map[uint64]*typesv1.Series sum bool } -// NewSeriesMerger creates a new series merger. If sum is set, samples +// NewTimeSeriesMerger creates a new series merger. If sum is set, samples // with matching timestamps are summed, otherwise duplicates are retained. -func NewSeriesMerger(sum bool) *SeriesMerger { - return &SeriesMerger{ +func NewTimeSeriesMerger(sum bool) *TimeSeriesMerger { + return &TimeSeriesMerger{ series: make(map[uint64]*typesv1.Series), sum: sum, } } -func (m *SeriesMerger) MergeSeries(s []*typesv1.Series) { +func (m *TimeSeriesMerger) MergeTimeSeries(s []*typesv1.Series) { m.mu.Lock() defer m.mu.Unlock() for _, x := range s { @@ -49,7 +49,11 @@ func (m *SeriesMerger) MergeSeries(s []*typesv1.Series) { } } -func (m *SeriesMerger) Series() []*typesv1.Series { +func (m *TimeSeriesMerger) IsEmpty() bool { + return len(m.series) == 0 +} + +func (m *TimeSeriesMerger) TimeSeries() []*typesv1.Series { if len(m.series) == 0 { return nil } @@ -66,7 +70,7 @@ func (m *SeriesMerger) Series() []*typesv1.Series { return r } -func (m *SeriesMerger) mergePoints(points []*typesv1.Point) int { +func (m *TimeSeriesMerger) mergePoints(points []*typesv1.Point) int { l := len(points) if l < 2 { return l diff --git a/pkg/model/series_test.go b/pkg/model/time_series_merger_test.go similarity index 100% rename from pkg/model/series_test.go rename to pkg/model/time_series_merger_test.go diff --git a/pkg/model/time_series_test.go b/pkg/model/time_series_test.go new file mode 100644 index 0000000000..6088b45166 --- /dev/null +++ b/pkg/model/time_series_test.go @@ -0,0 +1,154 @@ +package model + +import ( + "testing" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/iter" + "github.com/grafana/pyroscope/pkg/testhelper" +) + +func Test_RangeSeriesSum(t *testing.T) { + seriesA := NewLabelsBuilder(nil).Set("foo", "bar").Labels() + seriesB := NewLabelsBuilder(nil).Set("foo", "buzz").Labels() + for _, tc := range []struct { + name string + in []TimeSeriesValue + out []*typesv1.Series + }{ + { + name: "single series", + in: []TimeSeriesValue{ + {Ts: 1, Value: 1}, + {Ts: 1, Value: 1}, + {Ts: 2, Value: 2}, + {Ts: 3, Value: 3}, + {Ts: 4, Value: 4}, + {Ts: 5, Value: 5}, + }, + out: []*typesv1.Series{ + { + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 2}, + {Timestamp: 2, Value: 2}, + {Timestamp: 3, Value: 3}, + {Timestamp: 4, Value: 4}, + {Timestamp: 5, Value: 5}, + }, + }, + }, + }, + { + name: "multiple series", + in: []TimeSeriesValue{ + {Ts: 1, Value: 1, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 1, Value: 1, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 2, Value: 1, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 3, Value: 1, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 3, Value: 1, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 4, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 4, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 4, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 5, Value: 5, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + }, + out: []*typesv1.Series{ + { + Labels: seriesA, + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 1}, + {Timestamp: 2, Value: 1}, + {Timestamp: 4, Value: 4}, + {Timestamp: 5, Value: 5}, + }, + }, + { + Labels: seriesB, + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 1}, + {Timestamp: 3, Value: 2}, + {Timestamp: 4, Value: 8}, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + in := iter.NewSliceIterator(tc.in) + out := RangeSeries(in, 1, 5, 1, nil) + testhelper.EqualProto(t, tc.out, out) + }) + } +} + +func Test_RangeSeriesAvg(t *testing.T) { + seriesA := NewLabelsBuilder(nil).Set("foo", "bar").Labels() + seriesB := NewLabelsBuilder(nil).Set("foo", "buzz").Labels() + for _, tc := range []struct { + name string + in []TimeSeriesValue + out []*typesv1.Series + }{ + { + name: "single series", + in: []TimeSeriesValue{ + {Ts: 1, Value: 1}, + {Ts: 1, Value: 2}, + {Ts: 2, Value: 2}, + {Ts: 2, Value: 3}, + {Ts: 3, Value: 4}, + {Ts: 4, Value: 5}, + }, + out: []*typesv1.Series{ + { + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 1.5}, // avg of 1 and 2 + {Timestamp: 2, Value: 2.5}, // avg of 2 and 3 + {Timestamp: 3, Value: 4}, + {Timestamp: 4, Value: 5}, + }, + }, + }, + }, + { + name: "multiple series", + in: []TimeSeriesValue{ + {Ts: 1, Value: 1, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 1, Value: 1, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 2, Value: 1, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 2, Value: 2, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 3, Value: 1, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 3, Value: 2, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 4, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 6, Lbs: seriesB, LabelsHash: seriesB.Hash()}, + {Ts: 4, Value: 4, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + {Ts: 5, Value: 5, Lbs: seriesA, LabelsHash: seriesA.Hash()}, + }, + out: []*typesv1.Series{ + { + Labels: seriesA, + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 1}, + {Timestamp: 2, Value: 1.5}, // avg of 1 and 2 + {Timestamp: 4, Value: 4}, + {Timestamp: 5, Value: 5}, + }, + }, + { + Labels: seriesB, + Points: []*typesv1.Point{ + {Timestamp: 1, Value: 1}, + {Timestamp: 3, Value: 1.5}, // avg of 1 and 2 + {Timestamp: 4, Value: 5}, // avg of 4 and 6 + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + in := iter.NewSliceIterator(tc.in) + aggregation := typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_AVERAGE + out := RangeSeries(in, 1, 5, 1, &aggregation) + testhelper.EqualProto(t, tc.out, out) + }) + } +} diff --git a/pkg/model/tree.go b/pkg/model/tree.go index 613fd47a27..46c5201d8f 100644 --- a/pkg/model/tree.go +++ b/pkg/model/tree.go @@ -6,7 +6,6 @@ import ( "io" "sort" "strings" - "sync" dvarint "github.com/dennwc/varint" "github.com/xlab/treeprint" @@ -375,6 +374,17 @@ var errMalformedTreeBytes = fmt.Errorf("malformed tree bytes") const estimateBytesPerNode = 16 // Chosen empirically. +func MustUnmarshalTree(b []byte) *Tree { + if len(b) == 0 { + return new(Tree) + } + t, err := UnmarshalTree(b) + if err != nil { + panic(err) + } + return t +} + func UnmarshalTree(b []byte) (*Tree, error) { t := new(Tree) if len(b) < 2 { @@ -432,41 +442,3 @@ func UnmarshalTree(b []byte) (*Tree, error) { return t, nil } - -type TreeMerger struct { - mu sync.Mutex - t *Tree -} - -func NewTreeMerger() *TreeMerger { - return new(TreeMerger) -} - -func (m *TreeMerger) MergeTree(t *Tree) { - m.mu.Lock() - defer m.mu.Unlock() - if m.t != nil { - m.t.Merge(t) - } else { - m.t = t - } -} - -func (m *TreeMerger) MergeTreeBytes(b []byte) error { - // TODO(kolesnikovae): Ideally, we should not have - // the intermediate tree t but update m.t reading - // raw bytes b directly. - t, err := UnmarshalTree(b) - if err != nil { - return err - } - m.MergeTree(t) - return nil -} - -func (m *TreeMerger) Tree() *Tree { - if m.t == nil { - return new(Tree) - } - return m.t -} diff --git a/pkg/model/tree_merger.go b/pkg/model/tree_merger.go new file mode 100644 index 0000000000..fa75d891b5 --- /dev/null +++ b/pkg/model/tree_merger.go @@ -0,0 +1,47 @@ +package model + +import ( + "sync" +) + +type TreeMerger struct { + mu sync.Mutex + t *Tree +} + +func NewTreeMerger() *TreeMerger { + return new(TreeMerger) +} + +func (m *TreeMerger) MergeTree(t *Tree) { + m.mu.Lock() + defer m.mu.Unlock() + if m.t != nil { + m.t.Merge(t) + } else { + m.t = t + } +} + +func (m *TreeMerger) MergeTreeBytes(b []byte) error { + // TODO(kolesnikovae): Ideally, we should not have + // the intermediate tree t but update m.t reading + // raw bytes b directly. + t, err := UnmarshalTree(b) + if err != nil { + return err + } + m.MergeTree(t) + return nil +} + +func (m *TreeMerger) Tree() *Tree { + if m.t == nil { + return new(Tree) + } + return m.t +} + +func (m *TreeMerger) IsEmpty() bool { + return m.t == nil +} diff --git a/pkg/objstore/providers/memory/bucket_client.go b/pkg/objstore/providers/memory/bucket_client.go new file mode 100644 index 0000000000..5b20aefa70 --- /dev/null +++ b/pkg/objstore/providers/memory/bucket_client.go @@ -0,0 +1,230 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package memory + +import ( + "bytes" + "context" + "errors" + "io" + "sort" + "strings" + "sync" + "time" + + "github.com/thanos-io/objstore" +) + +var errNotFound = errors.New("inmem: object not found") + +// InMemBucket implements the objstore.Bucket interfaces against local memory. +// Methods from Bucket interface are thread-safe. Objects are assumed to be immutable. +type InMemBucket struct { + mtx sync.RWMutex + objects map[string][]byte + attrs map[string]objstore.ObjectAttributes +} + +// NewInMemBucket returns a new in memory Bucket. +// NOTE: Returned bucket is just a naive in memory bucket implementation. For test use cases only. +func NewInMemBucket() *InMemBucket { + return &InMemBucket{ + objects: map[string][]byte{}, + attrs: map[string]objstore.ObjectAttributes{}, + } +} + +// Objects returns a copy of the internally stored objects. +// NOTE: For assert purposes. +func (b *InMemBucket) Objects() map[string][]byte { + b.mtx.RLock() + defer b.mtx.RUnlock() + + objs := make(map[string][]byte) + for k, v := range b.objects { + objs[k] = v + } + + return objs +} + +// Iter calls f for each entry in the given directory. The argument to f is the full +// object name including the prefix of the inspected directory. +func (b *InMemBucket) Iter(_ context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { + unique := map[string]struct{}{} + params := objstore.ApplyIterOptions(options...) + + var dirPartsCount int + dirParts := strings.SplitAfter(dir, objstore.DirDelim) + for _, p := range dirParts { + if p == "" { + continue + } + dirPartsCount++ + } + + b.mtx.RLock() + for filename := range b.objects { + if !strings.HasPrefix(filename, dir) || dir == filename { + continue + } + + if params.Recursive { + // Any object matching the prefix should be included. + unique[filename] = struct{}{} + continue + } + + parts := strings.SplitAfter(filename, objstore.DirDelim) + unique[strings.Join(parts[:dirPartsCount+1], "")] = struct{}{} + } + b.mtx.RUnlock() + + var keys []string + for n := range unique { + keys = append(keys, n) + } + sort.Slice(keys, func(i, j int) bool { + if strings.HasSuffix(keys[i], objstore.DirDelim) && strings.HasSuffix(keys[j], objstore.DirDelim) { + return strings.Compare(keys[i], keys[j]) < 0 + } + if strings.HasSuffix(keys[i], objstore.DirDelim) { + return false + } + if strings.HasSuffix(keys[j], objstore.DirDelim) { + return true + } + + return strings.Compare(keys[i], keys[j]) < 0 + }) + + for _, k := range keys { + if err := f(k); err != nil { + return err + } + } + return nil +} + +// Get returns a reader for the given object name. +func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error) { + if name == "" { + return nil, errors.New("inmem: object name is empty") + } + + b.mtx.RLock() + file, ok := b.objects[name] + b.mtx.RUnlock() + if !ok { + return nil, errNotFound + } + + return io.NopCloser(bytes.NewReader(file)), nil +} + +// GetRange returns a new range reader for the given object name and range. +func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { + if name == "" { + return nil, errors.New("inmem: object name is empty") + } + + b.mtx.RLock() + file, ok := b.objects[name] + b.mtx.RUnlock() + if !ok { + return nil, errNotFound + } + + if int64(len(file)) < off { + return io.NopCloser(bytes.NewReader(nil)), nil + } + + if length == 0 { + return io.NopCloser(bytes.NewReader(nil)), nil + } + if length == -1 { + return io.NopCloser(bytes.NewReader(file[off:])), nil + } + + if int64(len(file)) <= off+length { + // Just return maximum of what we have. + length = int64(len(file)) - off + } + + return io.NopCloser(bytes.NewReader(file[off : off+length])), nil +} + +// Exists checks if the given directory exists in memory. +func (b *InMemBucket) Exists(_ context.Context, name string) (bool, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + _, ok := b.objects[name] + return ok, nil +} + +// Attributes returns information about the specified object. +func (b *InMemBucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { + b.mtx.RLock() + attrs, ok := b.attrs[name] + b.mtx.RUnlock() + if !ok { + return objstore.ObjectAttributes{}, errNotFound + } + return attrs, nil +} + +// Upload writes the file specified in src to into the memory. +func (b *InMemBucket) Upload(_ context.Context, name string, r io.Reader) error { + b.mtx.Lock() + defer b.mtx.Unlock() + body, err := io.ReadAll(r) + if err != nil { + return err + } + b.objects[name] = body + b.attrs[name] = objstore.ObjectAttributes{ + Size: int64(len(body)), + LastModified: time.Now(), + } + return nil +} + +// Delete removes all data prefixed with the dir. +func (b *InMemBucket) Delete(_ context.Context, name string) error { + b.mtx.Lock() + defer b.mtx.Unlock() + if _, ok := b.objects[name]; !ok { + return errNotFound + } + delete(b.objects, name) + delete(b.attrs, name) + return nil +} + +// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. +func (b *InMemBucket) IsObjNotFoundErr(err error) bool { + return errors.Is(err, errNotFound) +} + +// IsAccessDeniedErr returns true if access to object is denied. +func (b *InMemBucket) IsAccessDeniedErr(err error) bool { + return false +} + +func (b *InMemBucket) Close() error { return nil } + +// Name returns the bucket name. +func (b *InMemBucket) Name() string { + return "inmem" +} + +func (b *InMemBucket) Set(name string, data []byte) { + b.mtx.Lock() + defer b.mtx.Unlock() + b.objects[name] = data + b.attrs[name] = objstore.ObjectAttributes{ + Size: int64(len(data)), + LastModified: time.Now(), + } +} diff --git a/pkg/objstore/read_only_file.go b/pkg/objstore/read_only_file.go new file mode 100644 index 0000000000..350010262a --- /dev/null +++ b/pkg/objstore/read_only_file.go @@ -0,0 +1,179 @@ +package objstore + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/grafana/dskit/multierror" + "github.com/thanos-io/objstore" + + "github.com/grafana/pyroscope/pkg/util/bufferpool" +) + +type ReadOnlyFile struct { + size int64 + name string + path string + mu sync.Mutex + readers []*fileReader +} + +func Download(ctx context.Context, name string, src BucketReader, dir string) (*ReadOnlyFile, error) { + f, err := download(ctx, name, src, dir) + if err != nil { + return nil, fmt.Errorf("downloading %s: %w", name, err) + } + return f, nil +} + +func download(ctx context.Context, name string, src BucketReader, dir string) (f *ReadOnlyFile, err error) { + r, err := src.Get(ctx, name) + if err != nil { + return nil, err + } + f = &ReadOnlyFile{ + size: 0, + name: name, + path: filepath.Join(dir, filepath.Base(name)), + } + defer func() { + if err != nil { + _ = f.Close() + _ = r.Close() + } + }() + if err = os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + dst, err := os.Create(f.path) + if err != nil { + return nil, err + } + buf := bufferpool.GetBuffer(32 << 10) + defer bufferpool.Put(buf) + buf.B = buf.B[:cap(buf.B)] + n, err := io.CopyBuffer(dst, r, buf.B) + if err != nil { + return nil, err + } + f.size = n + return f, nil +} + +func (f *ReadOnlyFile) Close() error { + var m multierror.MultiError + for _, r := range f.readers { + m.Add(r.Close()) + } + m.Add(os.RemoveAll(f.path)) + f.readers = f.readers[:0] + return m.Err() +} + +func (f *ReadOnlyFile) Iter(context.Context, string, func(string) error, ...objstore.IterOption) error { + return nil +} + +func (f *ReadOnlyFile) Exists(_ context.Context, name string) (bool, error) { + return name == f.name, nil +} + +func (f *ReadOnlyFile) IsObjNotFoundErr(err error) bool { return os.IsNotExist(err) } + +func (f *ReadOnlyFile) IsAccessDeniedErr(err error) bool { return os.IsPermission(err) } + +func (f *ReadOnlyFile) Attributes(_ context.Context, name string) (attrs objstore.ObjectAttributes, err error) { + if name != f.name { + return attrs, os.ErrNotExist + } + return objstore.ObjectAttributes{ + Size: f.size, + LastModified: time.Unix(0, 0), // We don't care. + }, nil +} + +func (f *ReadOnlyFile) ReaderAt(_ context.Context, name string) (ReaderAtCloser, error) { + return f.borrowOrCreateReader(name) +} + +func (f *ReadOnlyFile) Get(_ context.Context, name string) (io.ReadCloser, error) { + r, err := f.borrowOrCreateReader(name) + if err != nil { + return nil, err + } + if _, err = r.Seek(0, io.SeekStart); err != nil { + _ = r.Close() + return nil, err + } + return r, nil +} + +func (f *ReadOnlyFile) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { + if off < 0 || length < 0 { + return nil, fmt.Errorf("%w: invalid offset", os.ErrInvalid) + } + r, err := f.borrowOrCreateReader(name) + if err != nil { + return nil, err + } + if _, err = r.Seek(off, io.SeekStart); err != nil { + _ = r.Close() + return nil, err + } + r.reader = io.LimitReader(r.reader, length) + return r, nil +} + +func (f *ReadOnlyFile) borrowOrCreateReader(name string) (*fileReader, error) { + if name != f.name { + return nil, os.ErrNotExist + } + f.mu.Lock() + defer f.mu.Unlock() + if len(f.readers) > 0 { + ff := f.readers[len(f.readers)-1] + f.readers = f.readers[:len(f.readers)-1] + ff.reader = ff.File + return ff, nil + } + return f.openReader() +} + +func (f *ReadOnlyFile) returnReader(r *fileReader) { + f.mu.Lock() + defer f.mu.Unlock() + f.readers = append(f.readers, r) +} + +func (f *ReadOnlyFile) openReader() (*fileReader, error) { + ff, err := os.Open(f.path) + if err != nil { + return nil, err + } + return &fileReader{ + parent: f, + File: ff, + reader: ff, + }, nil +} + +type fileReader struct { + parent *ReadOnlyFile + reader io.Reader + *os.File +} + +func (r *fileReader) Close() error { + r.reader = nil + r.parent.returnReader(r) + return nil +} + +func (r *fileReader) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/pkg/objstore/reader.go b/pkg/objstore/reader.go index 30cb96a6e1..5fa13d79c2 100644 --- a/pkg/objstore/reader.go +++ b/pkg/objstore/reader.go @@ -2,6 +2,7 @@ package objstore import ( "context" + "fmt" "io" "github.com/thanos-io/objstore" @@ -91,3 +92,47 @@ func (b *ReaderAt) ReadAt(p []byte, off int64) (int, error) { func (b *ReaderAt) Close() error { return nil } + +func ReadRange(ctx context.Context, reader io.ReaderFrom, name string, storage objstore.BucketReader, off, size int64) error { + if size == 0 { + attrs, err := storage.Attributes(ctx, name) + if err != nil { + return err + } + size = attrs.Size + } + if size == 0 { + return nil + } + rc, err := storage.GetRange(ctx, name, off, size) + if err != nil { + return err + } + defer func() { + _ = rc.Close() + }() + n, err := reader.ReadFrom(io.LimitReader(rc, size)) + if err != nil { + return err + } + if n != size { + return fmt.Errorf("read %d bytes, expected %d", n, size) + } + return nil +} + +type BucketReaderWithOffset struct { + BucketReader + offset int64 +} + +func NewBucketReaderWithOffset(r BucketReader, offset int64) *BucketReaderWithOffset { + return &BucketReaderWithOffset{ + BucketReader: r, + offset: offset, + } +} + +func (r *BucketReaderWithOffset) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + return r.BucketReader.GetRange(ctx, name, r.offset+off, length) +} diff --git a/pkg/phlaredb/block_querier.go b/pkg/phlaredb/block_querier.go index e5d45b5e35..1628dcc9ad 100644 --- a/pkg/phlaredb/block_querier.go +++ b/pkg/phlaredb/block_querier.go @@ -633,7 +633,7 @@ func (queriers Queriers) SelectMatchingProfiles(ctx context.Context, params *ing if err != nil { return nil, err } - return iter.NewMergeIterator(maxBlockProfile, true, iters...), nil + return phlaremodel.NewMergeIterator(maxBlockProfile, true, iters...), nil } func (queriers Queriers) LabelValues(ctx context.Context, req *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error) { @@ -1611,7 +1611,7 @@ func (b *singleBlockQuerier) SelectMatchingProfiles(ctx context.Context, params iters = append(iters, iter.NewSliceIterator(currentSeriesSlice)) } - return iter.NewMergeIterator(maxBlockProfile, false, iters...), nil + return phlaremodel.NewMergeIterator(maxBlockProfile, false, iters...), nil } func (b *singleBlockQuerier) SelectMergeByLabels( diff --git a/pkg/phlaredb/filter_profiles_bidi.go b/pkg/phlaredb/filter_profiles_bidi.go index 7cdf6dc131..64e185dc32 100644 --- a/pkg/phlaredb/filter_profiles_bidi.go +++ b/pkg/phlaredb/filter_profiles_bidi.go @@ -12,6 +12,7 @@ import ( ingestv1 "github.com/grafana/pyroscope/api/gen/proto/go/ingester/v1" "github.com/grafana/pyroscope/pkg/iter" + phlaremodel "github.com/grafana/pyroscope/pkg/model" ) type BidiServerMerge[Res any, Req any] interface { @@ -76,7 +77,7 @@ func filterProfiles[B BidiServerMerge[Res, Req], Res filterResponse, Req filterR querierIndex: i, } } - if err := iter.ReadBatch(ctx, iter.NewMergeIterator(ProfileWithIndex{ + if err := iter.ReadBatch(ctx, phlaremodel.NewMergeIterator(ProfileWithIndex{ Profile: maxBlockProfile, Index: 0, }, true, its...), batchProfileSize, func(ctx context.Context, batch []ProfileWithIndex) error { diff --git a/pkg/phlaredb/head.go b/pkg/phlaredb/head.go index e6efcbf7a0..24239e7489 100644 --- a/pkg/phlaredb/head.go +++ b/pkg/phlaredb/head.go @@ -327,6 +327,15 @@ func (h *Head) LabelNames(ctx context.Context, req *connect.Request[typesv1.Labe }), nil } +func (h *Head) MustProfileTypeNames() []string { + ptypes, err := h.profiles.index.ix.LabelValues(phlaremodel.LabelNameProfileType, nil) + if err != nil { + panic(err) + } + sort.Strings(ptypes) + return ptypes +} + // ProfileTypes returns the possible profile types. func (h *Head) ProfileTypes(ctx context.Context, req *connect.Request[ingestv1.ProfileTypesRequest]) (*connect.Response[ingestv1.ProfileTypesResponse], error) { values, err := h.profiles.index.ix.LabelValues(phlaremodel.LabelNameProfileType, nil) @@ -657,3 +666,11 @@ func (h *Head) GetMetaStats() block.MetaStats { defer h.metaLock.RUnlock() return h.meta.GetStats() } + +func (h *Head) Meta() *block.Meta { + return h.meta +} + +func (h *Head) LocalPathFor(relPath string) string { + return filepath.Join(h.localPath, relPath) +} diff --git a/pkg/phlaredb/head_queriers.go b/pkg/phlaredb/head_queriers.go index ac5233e45a..a39c9d6624 100644 --- a/pkg/phlaredb/head_queriers.go +++ b/pkg/phlaredb/head_queriers.go @@ -395,12 +395,12 @@ func (q *headInMemoryQuerier) SelectMatchingProfiles(ctx context.Context, params NewSeriesIterator( profileSeries.lbs, profileSeries.fp, - iter.NewTimeRangedIterator(iter.NewSliceIterator(profiles), start, end), + phlaremodel.NewTimeRangedIterator(iter.NewSliceIterator(profiles), start, end), ), ) } - return iter.NewMergeIterator(maxBlockProfile, false, iters...), nil + return phlaremodel.NewMergeIterator(maxBlockProfile, false, iters...), nil } func (q *headInMemoryQuerier) SelectMergeByStacktraces(ctx context.Context, params *ingestv1.SelectProfilesRequest, maxNodes int64) (*phlaremodel.Tree, error) { @@ -598,16 +598,14 @@ func (q *headInMemoryQuerier) MergeByLabels( sp, _ := opentracing.StartSpanFromContext(ctx, "MergeByLabels - HeadInMemory") defer sp.Finish() - seriesBuilder := seriesBuilder{} - seriesBuilder.init(by...) - + seriesBuilder := phlaremodel.NewTimeSeriesBuilder(by...) if len(sts.GetCallSite()) == 0 { for rows.Next() { p, ok := rows.At().(ProfileWithLabels) if !ok { return nil, errors.New("expected ProfileWithLabels") } - seriesBuilder.add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(p.Total())) + seriesBuilder.Add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(p.Total())) } } else { r := symdb.NewResolver(ctx, q.head.symdb, @@ -622,7 +620,7 @@ func (q *headInMemoryQuerier) MergeByLabels( if err := r.CallSiteValues(&v, p.StacktracePartition(), p.Samples()); err != nil { return nil, err } - seriesBuilder.add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(v.Total)) + seriesBuilder.Add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(v.Total)) } } @@ -630,7 +628,7 @@ func (q *headInMemoryQuerier) MergeByLabels( return nil, err } - return seriesBuilder.build(), nil + return seriesBuilder.Build(), nil } func (q *headInMemoryQuerier) SelectMergeByLabels( @@ -654,8 +652,7 @@ func (q *headInMemoryQuerier) SelectMergeByLabels( start = model.Time(params.Start) end = model.Time(params.End) ) - seriesBuilder := seriesBuilder{} - seriesBuilder.init(by...) + seriesBuilder := phlaremodel.NewTimeSeriesBuilder(by...) index.mutex.RLock() defer index.mutex.RUnlock() @@ -673,7 +670,7 @@ func (q *headInMemoryQuerier) SelectMergeByLabels( if p.Timestamp() > end { break } - seriesBuilder.add(fp, profileSeries.lbs, int64(p.Timestamp()), float64(p.Total())) + seriesBuilder.Add(fp, profileSeries.lbs, int64(p.Timestamp()), float64(p.Total())) } } } else { @@ -696,11 +693,11 @@ func (q *headInMemoryQuerier) SelectMergeByLabels( if err = r.CallSiteValues(&v, p.StacktracePartition, p.Samples); err != nil { return nil, err } - seriesBuilder.add(fp, profileSeries.lbs, int64(p.Timestamp()), float64(v.Total)) + seriesBuilder.Add(fp, profileSeries.lbs, int64(p.Timestamp()), float64(v.Total)) } } } - return seriesBuilder.build(), nil + return seriesBuilder.Build(), nil } func (q *headInMemoryQuerier) Series(ctx context.Context, params *ingestv1.SeriesRequest) ([]*typesv1.Labels, error) { diff --git a/pkg/phlaredb/querier.go b/pkg/phlaredb/querier.go index f8a286950a..c2bcb121fa 100644 --- a/pkg/phlaredb/querier.go +++ b/pkg/phlaredb/querier.go @@ -39,6 +39,7 @@ type IndexReader interface { // by the reference. // Returns storage.ErrNotFound if the ref does not resolve to a known series. Series(ref storage.SeriesRef, lset *phlaremodel.Labels, chks *[]index.ChunkMeta) (uint64, error) + SeriesBy(ref storage.SeriesRef, lset *phlaremodel.Labels, chks *[]index.ChunkMeta, by ...string) (uint64, error) // LabelNames returns all the unique label names present in the index in sorted order. LabelNames(matchers ...*labels.Matcher) ([]string, error) diff --git a/pkg/phlaredb/sample_merge.go b/pkg/phlaredb/sample_merge.go index 9d21e935d1..b06ceb4b11 100644 --- a/pkg/phlaredb/sample_merge.go +++ b/pkg/phlaredb/sample_merge.go @@ -2,14 +2,11 @@ package phlaredb import ( "context" - "sort" "strings" "github.com/grafana/dskit/runutil" "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" - "github.com/prometheus/common/model" - "github.com/samber/lo" profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" @@ -161,67 +158,6 @@ func mergeBySpans[T interface{ StacktracePartition() uint64 }](ctx context.Conte return profiles.Err() } -type seriesByLabels map[string]*typesv1.Series - -func (m seriesByLabels) normalize() []*typesv1.Series { - result := lo.Values(m) - sort.Slice(result, func(i, j int) bool { - return phlaremodel.CompareLabelPairs(result[i].Labels, result[j].Labels) < 0 - }) - // we have to sort the points in each series because labels reduction may have changed the order - for _, s := range result { - sort.Slice(s.Points, func(i, j int) bool { - return s.Points[i].Timestamp < s.Points[j].Timestamp - }) - } - return result -} - -type seriesBuilder struct { - labelsByFingerprint map[model.Fingerprint]string - labelBuf []byte - by []string - - series seriesByLabels -} - -func (s *seriesBuilder) init(by ...string) { - s.labelsByFingerprint = map[model.Fingerprint]string{} - s.series = make(seriesByLabels) - s.labelBuf = make([]byte, 0, 1024) - s.by = by -} - -func (s *seriesBuilder) add(fp model.Fingerprint, lbs phlaremodel.Labels, ts int64, value float64) { - labelsByString, ok := s.labelsByFingerprint[fp] - if !ok { - s.labelBuf = lbs.BytesWithLabels(s.labelBuf, s.by...) - labelsByString = string(s.labelBuf) - s.labelsByFingerprint[fp] = labelsByString - if _, ok := s.series[labelsByString]; !ok { - s.series[labelsByString] = &typesv1.Series{ - Labels: lbs.WithLabels(s.by...), - Points: []*typesv1.Point{ - { - Timestamp: ts, - Value: value, - }, - }, - } - return - } - } - series := s.series[labelsByString] - series.Points = append(series.Points, &typesv1.Point{ - Timestamp: ts, - Value: value, - }) -} - -func (s *seriesBuilder) build() []*typesv1.Series { - return s.series.normalize() -} - func mergeByLabels[T Profile]( ctx context.Context, profileSource Source, @@ -236,8 +172,7 @@ func mergeByLabels[T Profile]( profiles := query.NewRepeatedRowIterator(ctx, rows, profileSource.RowGroups(), column.ColumnIndex) defer runutil.CloseWithErrCapture(&err, profiles, "failed to close profile stream") - seriesBuilder := seriesBuilder{} - seriesBuilder.init(by...) + seriesBuilder := phlaremodel.NewTimeSeriesBuilder(by...) for profiles.Next() { values := profiles.At() @@ -246,10 +181,10 @@ func mergeByLabels[T Profile]( for _, e := range values.Values { total += e[0].Int64() } - seriesBuilder.add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(total)) + seriesBuilder.Add(p.Fingerprint(), p.Labels(), int64(p.Timestamp()), float64(total)) } - return seriesBuilder.build(), profiles.Err() + return seriesBuilder.Build(), profiles.Err() } func mergeByLabelsWithStackTraceSelector[T Profile]( @@ -268,8 +203,8 @@ func mergeByLabelsWithStackTraceSelector[T Profile]( columns.Value.ColumnIndex, ) - seriesBuilder := seriesBuilder{} - seriesBuilder.init(by...) + seriesBuilder := phlaremodel.TimeSeriesBuilder{} + seriesBuilder.Init(by...) defer runutil.CloseWithErrCapture(&err, profiles, "failed to close profile stream") var v symdb.CallSiteValues @@ -279,8 +214,8 @@ func mergeByLabelsWithStackTraceSelector[T Profile]( if err = r.CallSiteValuesParquet(&v, h.StacktracePartition(), row.Values[0], row.Values[1]); err != nil { return nil, err } - seriesBuilder.add(h.Fingerprint(), h.Labels(), int64(h.Timestamp()), float64(v.Total)) + seriesBuilder.Add(h.Fingerprint(), h.Labels(), int64(h.Timestamp()), float64(v.Total)) } - return seriesBuilder.build(), profiles.Err() + return seriesBuilder.Build(), profiles.Err() } diff --git a/pkg/phlaredb/symdb/block_reader.go b/pkg/phlaredb/symdb/block_reader.go index 8561b71c07..861a063f82 100644 --- a/pkg/phlaredb/symdb/block_reader.go +++ b/pkg/phlaredb/symdb/block_reader.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/pyroscope/pkg/objstore" "github.com/grafana/pyroscope/pkg/phlaredb/block" schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/util/bufferpool" "github.com/grafana/pyroscope/pkg/util/refctr" ) @@ -36,6 +37,83 @@ type Reader struct { meta *block.Meta files map[string]block.File parquetFiles *parquetFiles + + prefetchSize uint64 +} + +type Option func(*Reader) + +func WithPrefetchSize(size uint64) Option { + return func(r *Reader) { + r.prefetchSize = size + } +} + +func OpenObject(ctx context.Context, b objstore.BucketReader, name string, offset, size int64, options ...Option) (*Reader, error) { + f := block.File{ + RelPath: name, + SizeBytes: uint64(size), + } + r := &Reader{ + bucket: objstore.NewBucketReaderWithOffset(b, offset), + file: f, + } + for _, opt := range options { + opt(r) + } + + var err error + if r.prefetchSize > 0 { + err = r.openIndexWithPrefetch(ctx) + } else { + err = r.openIndex(ctx) + } + if err != nil { + return nil, fmt.Errorf("opening index section: %w", err) + } + + if err = r.buildPartitions(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *Reader) openIndexWithPrefetch(ctx context.Context) (err error) { + prefetchSize := r.prefetchSize + if prefetchSize > r.file.SizeBytes { + prefetchSize = r.file.SizeBytes + } + n, err := r.prefetchIndex(ctx, prefetchSize) + if err == nil && n != 0 { + _, err = r.prefetchIndex(ctx, prefetchSize) + } + return err +} + +func (r *Reader) prefetchIndex(ctx context.Context, size uint64) (n uint64, err error) { + if size < uint64(FooterSize) { + size = uint64(FooterSize) + } + prefetchOffset := r.file.SizeBytes - size + buf := bufferpool.GetBuffer(int(size)) + defer bufferpool.Put(buf) + if err = objstore.ReadRange(ctx, buf, r.file.RelPath, r.bucket, int64(prefetchOffset), int64(size)); err != nil { + return 0, fmt.Errorf("fetching index: %w", err) + } + footerOffset := size - uint64(FooterSize) + if err = r.footer.UnmarshalBinary(buf.B[footerOffset:]); err != nil { + return 0, fmt.Errorf("unmarshaling footer: %w", err) + } + if prefetchOffset > (r.footer.IndexOffset) { + return r.file.SizeBytes - r.footer.IndexOffset, nil + } + // prefetch offset is less that or equal to the index offset. + indexOffset := r.footer.IndexOffset - prefetchOffset + if r.index, err = OpenIndex(buf.B[indexOffset:footerOffset]); err != nil { + return 0, fmt.Errorf("opening index: %w", err) + } + return 0, nil } func Open(ctx context.Context, b objstore.BucketReader, m *block.Meta) (*Reader, error) { @@ -113,13 +191,6 @@ func (r *Reader) partitionReader(h *PartitionHeader) (*partition, error) { // openIndex locates footer and loads the index section from // the file into the memory. -// -// NOTE(kolesnikovae): Pre-fetch: we could speculatively fetch -// the footer and the index section into a larger buffer rather -// than retrieving them synchronously. -// -// NOTE(kolesnikovae): It is possible to skip the footer, if it -// was cached, and the index section offset and size are known. func (r *Reader) openIndex(ctx context.Context) error { if r.file.SizeBytes == 0 { attrs, err := r.bucket.Attributes(ctx, r.file.RelPath) diff --git a/pkg/phlaredb/tsdb/index/index.go b/pkg/phlaredb/tsdb/index/index.go index 31081d97ed..d2eb1205eb 100644 --- a/pkg/phlaredb/tsdb/index/index.go +++ b/pkg/phlaredb/tsdb/index/index.go @@ -208,6 +208,10 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { // NewWriter returns a new Writer to the given filename. It serializes data in format version 2. func NewWriter(ctx context.Context, fn string) (*Writer, error) { + return NewWriterSize(ctx, fn, 4<<20) +} + +func NewWriterSize(ctx context.Context, fn string, bufferSize int) (*Writer, error) { dir := filepath.Dir(fn) df, err := fileutil.OpenDir(dir) @@ -221,17 +225,17 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { } // Main index file we are building. - f, err := NewFileWriter(fn) + f, err := NewFileWriter(fn, bufferSize) if err != nil { return nil, err } // Temporary file for postings. - fP, err := NewFileWriter(fn + "_tmp_p") + fP, err := NewFileWriter(fn+"_tmp_p", bufferSize) if err != nil { return nil, err } // Temporary file for posting offset table. - fPO, err := NewFileWriter(fn + "_tmp_po") + fPO, err := NewFileWriter(fn+"_tmp_po", bufferSize) if err != nil { return nil, err } @@ -247,8 +251,8 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { stage: idxStageNone, // Reusable memory. - buf1: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, 1<<22)}), - buf2: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, 1<<22)}), + buf1: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, bufferSize)}), + buf2: encoding.EncWrap(tsdb_enc.Encbuf{B: make([]byte, 0, bufferSize)}), symbolCache: make(map[string]symbolCacheEntry, 1<<8), labelNames: make(map[string]uint64, 1<<8), @@ -279,14 +283,14 @@ type FileWriter struct { name string } -func NewFileWriter(name string) (*FileWriter, error) { +func NewFileWriter(name string, bufferSize int) (*FileWriter, error) { f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666) if err != nil { return nil, err } return &FileWriter{ f: f, - fbuf: bufio.NewWriterSize(f, 1<<22), + fbuf: bufio.NewWriterSize(f, bufferSize), pos: 0, name: name, }, nil @@ -1076,7 +1080,8 @@ func (w *Writer) writePostings() error { return err } // Don't need to calculate a checksum, so can copy directly. - n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, make([]byte, 1<<20)) + buf := w.buf1.B[:cap(w.buf1.B)] + n, err := io.CopyBuffer(w.f.fbuf, w.fP.f, buf) if err != nil { return err } diff --git a/pkg/phlaredb/tsdb/index/index_test.go b/pkg/phlaredb/tsdb/index/index_test.go index 8f355fc6bc..2044b9443a 100644 --- a/pkg/phlaredb/tsdb/index/index_test.go +++ b/pkg/phlaredb/tsdb/index/index_test.go @@ -384,7 +384,7 @@ func TestPersistence_index_e2e(t *testing.T) { } } - var input indexWriterSeriesSlice + var input IndexWriterSeriesSlice // Generate ChunkMetas for every label set. for i, lset := range flbls { @@ -397,9 +397,9 @@ func TestPersistence_index_e2e(t *testing.T) { Checksum: rand.Uint32(), }) } - input = append(input, &indexWriterSeries{ - labels: lset, - chunks: metas, + input = append(input, &IndexWriterSeries{ + Labels: lset, + Chunks: metas, }) } @@ -424,11 +424,11 @@ func TestPersistence_index_e2e(t *testing.T) { mi := newMockIndex() for i, s := range input { - err = iw.AddSeries(storage.SeriesRef(i), s.labels, model.Fingerprint(s.labels.Hash()), s.chunks...) + err = iw.AddSeries(storage.SeriesRef(i), s.Labels, model.Fingerprint(s.Labels.Hash()), s.Chunks...) require.NoError(t, err) - require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) + require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.Labels, s.Chunks...)) - for _, l := range s.labels { + for _, l := range s.Labels { valset, ok := values[l.Name] if !ok { valset = map[string]struct{}{} @@ -436,7 +436,7 @@ func TestPersistence_index_e2e(t *testing.T) { } valset[l.Value] = struct{}{} } - postings.Add(storage.SeriesRef(i), s.labels) + postings.Add(storage.SeriesRef(i), s.Labels) } err = iw.Close() diff --git a/pkg/phlaredb/tsdb/index/postings.go b/pkg/phlaredb/tsdb/index/postings.go index a63bfb7b7d..9af139deba 100644 --- a/pkg/phlaredb/tsdb/index/postings.go +++ b/pkg/phlaredb/tsdb/index/postings.go @@ -730,6 +730,10 @@ type bigEndianPostings struct { cur uint32 } +func NewBigEndianPostings(list []byte) Postings { + return newBigEndianPostings(list) +} + func newBigEndianPostings(list []byte) *bigEndianPostings { return &bigEndianPostings{list: list} } diff --git a/pkg/phlaredb/tsdb/index/test_utils.go b/pkg/phlaredb/tsdb/index/test_utils.go index 9c66ad34d2..d7b78d2437 100644 --- a/pkg/phlaredb/tsdb/index/test_utils.go +++ b/pkg/phlaredb/tsdb/index/test_utils.go @@ -4,16 +4,16 @@ import ( phlaremodel "github.com/grafana/pyroscope/pkg/model" ) -type indexWriterSeries struct { - labels phlaremodel.Labels - chunks []ChunkMeta // series file offset of chunks +type IndexWriterSeries struct { + Labels phlaremodel.Labels + Chunks []ChunkMeta // series file offset of chunks } -type indexWriterSeriesSlice []*indexWriterSeries +type IndexWriterSeriesSlice []*IndexWriterSeries -func (s indexWriterSeriesSlice) Len() int { return len(s) } -func (s indexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s IndexWriterSeriesSlice) Len() int { return len(s) } +func (s IndexWriterSeriesSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s indexWriterSeriesSlice) Less(i, j int) bool { - return phlaremodel.CompareLabelPairs(s[i].labels, s[j].labels) < 0 +func (s IndexWriterSeriesSlice) Less(i, j int) bool { + return phlaremodel.CompareLabelPairs(s[i].Labels, s[j].Labels) < 0 } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 78977b9b2f..6f9bb2ddf6 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -34,7 +34,6 @@ import ( "github.com/grafana/pyroscope/api/gen/proto/go/vcs/v1/vcsv1connect" connectapi "github.com/grafana/pyroscope/pkg/api/connect" "github.com/grafana/pyroscope/pkg/clientpool" - "github.com/grafana/pyroscope/pkg/iter" phlaremodel "github.com/grafana/pyroscope/pkg/model" phlareobj "github.com/grafana/pyroscope/pkg/objstore" "github.com/grafana/pyroscope/pkg/phlaredb/bucketindex" @@ -975,7 +974,7 @@ func (q *Querier) SelectSeries(ctx context.Context, req *connect.Request[querier return nil, connect.NewError(connect.CodeInternal, err) } - result := rangeSeries(it, req.Msg.Start, req.Msg.End, stepMs, req.Msg.Aggregation) + result := phlaremodel.RangeSeries(it, req.Msg.Start, req.Msg.End, stepMs, req.Msg.Aggregation) if it.Err() != nil { return nil, connect.NewError(connect.CodeInternal, it.Err()) } @@ -1040,77 +1039,6 @@ func (q *Querier) selectSeries(ctx context.Context, req *connect.Request[querier return responses, nil } -// rangeSeries aggregates profiles into series. -// Series contains points spaced by step from start to end. -// Profiles from the same step are aggregated into one point. -func rangeSeries(it iter.Iterator[ProfileValue], start, end, step int64, aggregation *typesv1.TimeSeriesAggregationType) []*typesv1.Series { - defer it.Close() - seriesMap := make(map[uint64]*typesv1.Series) - aggregators := make(map[uint64]TimeSeriesAggregator) - - if !it.Next() { - return nil - } - - // advance from the start to the end, adding each step results to the map. -Outer: - for currentStep := start; currentStep <= end; currentStep += step { - for { - aggregator, ok := aggregators[it.At().LabelsHash] - if !ok { - aggregator = NewTimeSeriesAggregator(aggregation) - aggregators[it.At().LabelsHash] = aggregator - } - if it.At().Ts > currentStep { - if !aggregator.IsEmpty() { - series := seriesMap[it.At().LabelsHash] - series.Points = append(series.Points, aggregator.GetAndReset()) - } - break // no more profiles for the currentStep - } - // find or create series - series, ok := seriesMap[it.At().LabelsHash] - if !ok { - seriesMap[it.At().LabelsHash] = &typesv1.Series{ - Labels: it.At().Lbs, - Points: []*typesv1.Point{}, - } - aggregator.Add(currentStep, it.At().Value) - if !it.Next() { - break Outer - } - continue - } - // Aggregate point if it is in the current step. - if aggregator.GetTimestamp() == currentStep { - aggregator.Add(currentStep, it.At().Value) - if !it.Next() { - break Outer - } - continue - } - // Next step is missing - if !aggregator.IsEmpty() { - series.Points = append(series.Points, aggregator.GetAndReset()) - } - aggregator.Add(currentStep, it.At().Value) - if !it.Next() { - break Outer - } - } - } - for lblHash, aggregator := range aggregators { - if !aggregator.IsEmpty() { - seriesMap[lblHash].Points = append(seriesMap[lblHash].Points, aggregator.GetAndReset()) - } - } - series := lo.Values(seriesMap) - sort.Slice(series, func(i, j int) bool { - return phlaremodel.CompareLabelPairs(series[i].Labels, series[j].Labels) < 0 - }) - return series -} - func uniqueSortedStrings(responses []ResponseFromReplica[[]string]) []string { total := 0 for _, r := range responses { @@ -1186,87 +1114,3 @@ func (q *Querier) selectSpanProfile(ctx context.Context, req *querierv1.SelectMe storegatewayTree.Merge(ingesterTree) return storegatewayTree, nil } - -type TimeSeriesAggregator interface { - Add(ts int64, value float64) - GetAndReset() *typesv1.Point - IsEmpty() bool - GetTimestamp() int64 -} - -func NewTimeSeriesAggregator(aggregation *typesv1.TimeSeriesAggregationType) TimeSeriesAggregator { - if aggregation == nil { - return &sumTimeSeriesAggregator{ - ts: -1, - } - } - if *aggregation == typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_AVERAGE { - return &avgTimeSeriesAggregator{ - ts: -1, - } - } - return &sumTimeSeriesAggregator{ - ts: -1, - } -} - -type sumTimeSeriesAggregator struct { - ts int64 - sum float64 -} - -func (a *sumTimeSeriesAggregator) Add(ts int64, value float64) { - a.ts = ts - a.sum += value -} - -func (a *sumTimeSeriesAggregator) GetAndReset() *typesv1.Point { - tsCopy := a.ts - sumCopy := a.sum - a.ts = -1 - a.sum = 0 - return &typesv1.Point{ - Timestamp: tsCopy, - Value: sumCopy, - } -} - -func (a *sumTimeSeriesAggregator) IsEmpty() bool { - return a.ts == -1 -} - -func (a *sumTimeSeriesAggregator) GetTimestamp() int64 { - return a.ts -} - -type avgTimeSeriesAggregator struct { - ts int64 - sum float64 - count int64 -} - -func (a *avgTimeSeriesAggregator) Add(ts int64, value float64) { - a.ts = ts - a.sum += value - a.count++ -} - -func (a *avgTimeSeriesAggregator) GetAndReset() *typesv1.Point { - avg := a.sum / float64(a.count) - tsCopy := a.ts - a.ts = -1 - a.sum = 0 - a.count = 0 - return &typesv1.Point{ - Timestamp: tsCopy, - Value: avg, - } -} - -func (a *avgTimeSeriesAggregator) IsEmpty() bool { - return a.ts == -1 -} - -func (a *avgTimeSeriesAggregator) GetTimestamp() int64 { - return a.ts -} diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 0183acbc8f..0e5e593e7f 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -29,7 +29,6 @@ import ( querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" "github.com/grafana/pyroscope/pkg/clientpool" - "github.com/grafana/pyroscope/pkg/iter" phlaremodel "github.com/grafana/pyroscope/pkg/model" objstoreclient "github.com/grafana/pyroscope/pkg/objstore/client" "github.com/grafana/pyroscope/pkg/objstore/providers/filesystem" @@ -1005,147 +1004,6 @@ func (f *fakeQuerierIngester) MergeProfilesPprof(ctx context.Context) clientpool return res } -func Test_RangeSeriesSum(t *testing.T) { - for _, tc := range []struct { - name string - in []ProfileValue - out []*typesv1.Series - }{ - { - name: "single series", - in: []ProfileValue{ - {Ts: 1, Value: 1}, - {Ts: 1, Value: 1}, - {Ts: 2, Value: 2}, - {Ts: 3, Value: 3}, - {Ts: 4, Value: 4}, - {Ts: 5, Value: 5}, - }, - out: []*typesv1.Series{ - { - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 2}, - {Timestamp: 2, Value: 2}, - {Timestamp: 3, Value: 3}, - {Timestamp: 4, Value: 4}, - {Timestamp: 5, Value: 5}, - }, - }, - }, - }, - { - name: "multiple series", - in: []ProfileValue{ - {Ts: 1, Value: 1, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 1, Value: 1, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 2, Value: 1, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 3, Value: 1, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 3, Value: 1, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 4, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 4, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 4, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 5, Value: 5, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - }, - out: []*typesv1.Series{ - { - Labels: foobarlabels, - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 1}, - {Timestamp: 2, Value: 1}, - {Timestamp: 4, Value: 4}, - {Timestamp: 5, Value: 5}, - }, - }, - { - Labels: foobuzzlabels, - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 1}, - {Timestamp: 3, Value: 2}, - {Timestamp: 4, Value: 8}, - }, - }, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - in := iter.NewSliceIterator(tc.in) - out := rangeSeries(in, 1, 5, 1, nil) - testhelper.EqualProto(t, tc.out, out) - }) - } -} - -func Test_RangeSeriesAvg(t *testing.T) { - for _, tc := range []struct { - name string - in []ProfileValue - out []*typesv1.Series - }{ - { - name: "single series", - in: []ProfileValue{ - {Ts: 1, Value: 1}, - {Ts: 1, Value: 2}, - {Ts: 2, Value: 2}, - {Ts: 2, Value: 3}, - {Ts: 3, Value: 4}, - {Ts: 4, Value: 5}, - }, - out: []*typesv1.Series{ - { - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 1.5}, // avg of 1 and 2 - {Timestamp: 2, Value: 2.5}, // avg of 2 and 3 - {Timestamp: 3, Value: 4}, - {Timestamp: 4, Value: 5}, - }, - }, - }, - }, - { - name: "multiple series", - in: []ProfileValue{ - {Ts: 1, Value: 1, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 1, Value: 1, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 2, Value: 1, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 2, Value: 2, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 3, Value: 1, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 3, Value: 2, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 4, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 6, Lbs: foobuzzlabels, LabelsHash: foobuzzlabels.Hash()}, - {Ts: 4, Value: 4, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - {Ts: 5, Value: 5, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, - }, - out: []*typesv1.Series{ - { - Labels: foobarlabels, - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 1}, - {Timestamp: 2, Value: 1.5}, // avg of 1 and 2 - {Timestamp: 4, Value: 4}, - {Timestamp: 5, Value: 5}, - }, - }, - { - Labels: foobuzzlabels, - Points: []*typesv1.Point{ - {Timestamp: 1, Value: 1}, - {Timestamp: 3, Value: 1.5}, // avg of 1 and 2 - {Timestamp: 4, Value: 5}, // avg of 4 and 6 - }, - }, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - in := iter.NewSliceIterator(tc.in) - aggregation := typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_AVERAGE - out := rangeSeries(in, 1, 5, 1, &aggregation) - testhelper.EqualProto(t, tc.out, out) - }) - } -} - func Test_splitQueryToStores(t *testing.T) { for _, tc := range []struct { name string diff --git a/pkg/querier/select_merge.go b/pkg/querier/select_merge.go index e4bb2a48c6..4460342638 100644 --- a/pkg/querier/select_merge.go +++ b/pkg/querier/select_merge.go @@ -10,7 +10,6 @@ import ( "github.com/grafana/dskit/multierror" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/common/model" "github.com/samber/lo" "golang.org/x/sync/errgroup" @@ -463,23 +462,8 @@ func selectMergePprofProfile(ctx context.Context, ty *typesv1.ProfileType, respo return p, nil } -type ProfileValue struct { - Ts int64 - Lbs []*typesv1.LabelPair - LabelsHash uint64 - Value float64 -} - -func (p ProfileValue) Labels() phlaremodel.Labels { - return p.Lbs -} - -func (p ProfileValue) Timestamp() model.Time { - return model.Time(p.Ts) -} - // selectMergeSeries selects the profile from each ingester by deduping them and request merges of total values. -func selectMergeSeries(ctx context.Context, aggregation *typesv1.TimeSeriesAggregationType, responses []ResponseFromReplica[clientpool.BidiClientMergeProfilesLabels]) (iter.Iterator[ProfileValue], error) { +func selectMergeSeries(ctx context.Context, aggregation *typesv1.TimeSeriesAggregationType, responses []ResponseFromReplica[clientpool.BidiClientMergeProfilesLabels]) (iter.Iterator[phlaremodel.TimeSeriesValue], error) { mergeResults := make([]MergeResult[[]*typesv1.Series], len(responses)) iters := make([]MergeIterator, len(responses)) var wg sync.WaitGroup @@ -524,12 +508,12 @@ func selectMergeSeries(ctx context.Context, aggregation *typesv1.TimeSeriesAggre } var series = phlaremodel.MergeSeries(aggregation, results...) - seriesIters := make([]iter.Iterator[ProfileValue], 0, len(series)) + seriesIters := make([]iter.Iterator[phlaremodel.TimeSeriesValue], 0, len(series)) for _, s := range series { s := s - seriesIters = append(seriesIters, newSeriesIterator(s.Labels, s.Points)) + seriesIters = append(seriesIters, phlaremodel.NewSeriesIterator(s.Labels, s.Points)) } - return iter.NewMergeIterator(ProfileValue{Ts: math.MaxInt64}, false, seriesIters...), nil + return phlaremodel.NewMergeIterator(phlaremodel.TimeSeriesValue{Ts: math.MaxInt64}, false, seriesIters...), nil } // selectMergeSpanProfile selects the profile from each ingester by deduping them and @@ -581,43 +565,3 @@ func selectMergeSpanProfile(ctx context.Context, responses []ResponseFromReplica span.LogFields(otlog.String("msg", "building tree")) return m.Tree(), nil } - -type seriesIterator struct { - point []*typesv1.Point - - curr ProfileValue -} - -func newSeriesIterator(lbs []*typesv1.LabelPair, points []*typesv1.Point) *seriesIterator { - return &seriesIterator{ - point: points, - - curr: ProfileValue{ - Lbs: lbs, - LabelsHash: phlaremodel.Labels(lbs).Hash(), - }, - } -} - -func (s *seriesIterator) Next() bool { - if len(s.point) == 0 { - return false - } - p := s.point[0] - s.point = s.point[1:] - s.curr.Ts = p.Timestamp - s.curr.Value = p.Value - return true -} - -func (s *seriesIterator) At() ProfileValue { - return s.curr -} - -func (s *seriesIterator) Err() error { - return nil -} - -func (s *seriesIterator) Close() error { - return nil -} diff --git a/pkg/querier/select_merge_test.go b/pkg/querier/select_merge_test.go index 6431a4ceac..12ab1fd6a2 100644 --- a/pkg/querier/select_merge_test.go +++ b/pkg/querier/select_merge_test.go @@ -17,10 +17,7 @@ import ( "github.com/grafana/pyroscope/pkg/testhelper" ) -var ( - foobarlabels = phlaremodel.Labels([]*typesv1.LabelPair{{Name: "foo", Value: "bar"}}) - foobuzzlabels = phlaremodel.Labels([]*typesv1.LabelPair{{Name: "foo", Value: "buzz"}}) -) +var foobarlabels = phlaremodel.Labels([]*typesv1.LabelPair{{Name: "foo", Value: "bar"}}) func TestSelectMergeStacktraces(t *testing.T) { resp1 := newFakeBidiClientStacktraces([]*ingestv1.ProfileSets{ @@ -202,7 +199,7 @@ func TestSelectMergeByLabels(t *testing.T) { }) values, err := iter.Slice(res) require.NoError(t, err) - require.Equal(t, []ProfileValue{ + require.Equal(t, []phlaremodel.TimeSeriesValue{ {Ts: 1, Value: 1.0, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, {Ts: 2, Value: 2.0, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, {Ts: 3, Value: 3.0, Lbs: foobarlabels, LabelsHash: foobarlabels.Hash()}, diff --git a/pkg/util/bufferpool/pool.go b/pkg/util/bufferpool/pool.go new file mode 100644 index 0000000000..b4fa477460 --- /dev/null +++ b/pkg/util/bufferpool/pool.go @@ -0,0 +1,98 @@ +package bufferpool + +import ( + "bytes" + "io" + "sync" +) + +// Sized *bytes.Buffer pools: from 2^9 (512b) to 2^30 (1GB). +var pools [maxPool]sync.Pool + +type Buffer struct { + B []byte + p int64 +} + +const ( + minBits = 9 + maxPool = 22 +) + +// GetBuffer returns a buffer from the pool, or creates a new one. +// The returned buffer has at least the requested capacity. +func GetBuffer(size int) *Buffer { + i := poolIndex(size) + if i < 0 { + return &Buffer{B: make([]byte, 0, size)} + } + x := pools[i].Get() + if x != nil { + return x.(*Buffer) + } + c := 2 << (minBits + i - 1) + c += bytes.MinRead + return &Buffer{ + B: make([]byte, 0, c), + p: i, + } +} + +// Put places the buffer into the pool. +func Put(b *Buffer) { + if b == nil { + return + } + if p := returnPool(cap(b.B), b.p); p > 0 { + b.B = b.B[:0] + pools[p].Put(b) + } +} + +func returnPool(c int, p int64) int64 { + // Empty buffers are ignored. + if c == 0 { + return -1 + } + i := poolIndex(c) + if p == 0 { + // The buffer does not belong to any pool, or it's + // of the smallest size. We pick the pool based on + // its current capacity. + return i + } + d := i - p + if d < 0 { + // This buffer was likely obtained outside the pool. + // For example, an empty one, or with pre-allocated + // byte slice. + return i + } + if d > 1 { + // Relocate the buffer, if it's capacity has been + // grown by more than a power of two. + return i + } + // Otherwise, keep the buffer in the current pool. + return p +} + +func poolIndex(n int) (i int64) { + n-- + n >>= minBits + for n > 0 { + n >>= 1 + i++ + } + if i >= maxPool { + return -1 + } + return i +} + +func (b *Buffer) ReadFrom(r io.Reader) (int64, error) { + buf := bytes.NewBuffer(b.B) + n, err := buf.ReadFrom(r) + b.B = buf.Bytes() + return n, err +} diff --git a/pkg/util/bufferpool/pool_test.go b/pkg/util/bufferpool/pool_test.go new file mode 100644 index 0000000000..5097915208 --- /dev/null +++ b/pkg/util/bufferpool/pool_test.go @@ -0,0 +1,22 @@ +package bufferpool + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_returnPool(t *testing.T) { + assert.EqualValues(t, 0, returnPool(512, 0)) // Buffers can be added to the pool. + assert.EqualValues(t, 1, returnPool(513, 0)) + assert.EqualValues(t, 1, returnPool(1<<10, 0)) + assert.EqualValues(t, -1, returnPool(0, 0)) // Empty buffers are ignored. + assert.EqualValues(t, -1, returnPool(0, 10)) // + assert.EqualValues(t, 5, returnPool(1<<14, 0)) // New buffers are added to the appropriate pool. + assert.EqualValues(t, 5, returnPool(1<<14, 3)) // Buffer of a capacity exceeding the next power of two are relocated. + assert.EqualValues(t, 4, returnPool(1<<14, 4)) // Buffer of a capacity not exceeding the next power of two are retained. + assert.EqualValues(t, 5, returnPool(1<<14, 5)) // Buffer of the nominal capacity. + assert.EqualValues(t, 5, returnPool(1<<14, 6)) // Buffer of a smaller capacity must be relocated. + assert.EqualValues(t, 21, returnPool(1<<30, 13)) + assert.EqualValues(t, -1, returnPool(1<<30+1, 13)) // No pools for buffers larger than 4MB. +} diff --git a/pkg/util/health/health.go b/pkg/util/health/health.go new file mode 100644 index 0000000000..675596e1bf --- /dev/null +++ b/pkg/util/health/health.go @@ -0,0 +1,33 @@ +package health + +import ( + "github.com/grafana/dskit/services" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +type Service interface { + SetServingStatus(string, grpc_health_v1.HealthCheckResponse_ServingStatus) +} + +type noopService struct{} + +var NoOpService = noopService{} + +func (noopService) SetServingStatus(string, grpc_health_v1.HealthCheckResponse_ServingStatus) {} + +func NewGRPCHealthService() *GRPCHealthService { + s := health.NewServer() + return &GRPCHealthService{ + Server: s, + Service: services.NewIdleService(nil, func(error) error { + s.Shutdown() + return nil + }), + } +} + +type GRPCHealthService struct { + services.Service + *health.Server +} diff --git a/pkg/util/http.go b/pkg/util/http.go index 43920d4798..3149f5df42 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -13,12 +13,13 @@ import ( "strings" "time" + "github.com/grafana/dskit/instrument" + "github.com/dustin/go-humanize" "github.com/felixge/httpsnoop" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gorilla/mux" - "github.com/grafana/dskit/instrument" dslog "github.com/grafana/dskit/log" "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/multierror" diff --git a/pkg/util/interceptor.go b/pkg/util/interceptor.go index c4437a1d93..e65f386e3c 100644 --- a/pkg/util/interceptor.go +++ b/pkg/util/interceptor.go @@ -68,7 +68,6 @@ func NewLogInterceptor(logger log.Logger) connect.UnaryInterceptorFunc { "route", req.Spec().Procedure, "tenant", tenantID, "traceID", traceID, - "parameters", req.Any(), "duration", time.Since(begin), ) }() diff --git a/pkg/util/recovery.go b/pkg/util/recovery.go index bcd7905e3b..7884663829 100644 --- a/pkg/util/recovery.go +++ b/pkg/util/recovery.go @@ -29,7 +29,7 @@ var ( return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { defer func() { if p := recover(); p != nil { - httputil.Error(w, httpgrpc.Errorf(http.StatusInternalServerError, "error while processing request: %v", panicError(p))) + httputil.Error(w, httpgrpc.Errorf(http.StatusInternalServerError, "error while processing request: %v", PanicError(p))) } }() next.ServeHTTP(w, req) @@ -39,7 +39,7 @@ var ( RecoveryInterceptor recoveryInterceptor ) -func panicError(p interface{}) error { +func PanicError(p interface{}) error { stack := make([]byte, maxStacksize) stack = stack[:runtime.Stack(stack, true)] // keep a multiline stack @@ -53,7 +53,7 @@ func RecoverPanic(f func() error) func() error { return func() (err error) { defer func() { if p := recover(); p != nil { - err = panicError(p) + err = PanicError(p) } }() return f() @@ -66,7 +66,7 @@ func (recoveryInterceptor) WrapUnary(next connect.UnaryFunc) connect.UnaryFunc { return func(ctx context.Context, req connect.AnyRequest) (resp connect.AnyResponse, err error) { defer func() { if p := recover(); p != nil { - err = connect.NewError(connect.CodeInternal, panicError(p)) + err = connect.NewError(connect.CodeInternal, PanicError(p)) } }() return next(ctx, req) @@ -77,7 +77,7 @@ func (recoveryInterceptor) WrapStreamingHandler(next connect.StreamingHandlerFun return func(ctx context.Context, conn connect.StreamingHandlerConn) (err error) { defer func() { if p := recover(); p != nil { - err = connect.NewError(connect.CodeInternal, panicError(p)) + err = connect.NewError(connect.CodeInternal, PanicError(p)) } }() return next(ctx, conn) diff --git a/pkg/util/refctr/refctr.go b/pkg/util/refctr/refctr.go index 7bfcba35ee..df2c541232 100644 --- a/pkg/util/refctr/refctr.go +++ b/pkg/util/refctr/refctr.go @@ -3,8 +3,9 @@ package refctr import "sync" type Counter struct { - m sync.Mutex - c int + m sync.Mutex + c int + err error } // Inc increments the counter and calls the init function, @@ -30,15 +31,43 @@ func (r *Counter) Inc(init func() error) (err error) { return init() } +// IncErr is identical to Inc, with the only difference that if the +// function fails, the error is returned on any further IncErr call, +// preventing from calling the faulty initialization function again. +func (r *Counter) IncErr(init func() error) (err error) { + r.m.Lock() + if r.err != nil { + err = r.err + r.m.Unlock() + return err + } + defer func() { + // If initialization fails, we need to make sure + // the next call makes another attempt. + if err != nil { + r.err = err + r.c-- + } + r.m.Unlock() + }() + if r.c++; r.c > 1 { + return nil + } + // Mutex is acquired during the call in order to serialize + // access to the resources, so that the consequent callers + // only have access to them after initialization finishes. + return init() +} + // Dec decrements the counter and calls the release function, // if this is the last reference. func (r *Counter) Dec(release func()) { r.m.Lock() + defer r.m.Unlock() if r.c < 0 { panic("bug: negative reference counter") } if r.c--; r.c < 1 { release() } - r.m.Unlock() }